From c06ee5b5de22b98021b3966b82931da50bb30cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Fri, 8 Nov 2019 13:42:56 +0100 Subject: [PATCH 1/9] Remove go-querystring from go.mod-extra --- cluster-autoscaler/go.mod-extra | 1 - 1 file changed, 1 deletion(-) diff --git a/cluster-autoscaler/go.mod-extra b/cluster-autoscaler/go.mod-extra index de1acf63132..4b367633d71 100644 --- a/cluster-autoscaler/go.mod-extra +++ b/cluster-autoscaler/go.mod-extra @@ -3,7 +3,6 @@ go 1.12 require ( github.com/rancher/go-rancher v0.1.0 - github.com/google/go-querystring v1.0.0 github.com/aws/aws-sdk-go v1.23.18 ) From ba23cf04e83972fca8977e8c799014ca2ea4ae36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:35:10 +0100 Subject: [PATCH 2/9] Updating vendor against git@github.com:kubernetes/kubernetes.git:master (d87c921a516a5b0387269910cef8551fae62de7f) Change-Id: If8a00e019477b78fc69fbbe0c498a37694d89094 --- cluster-autoscaler/go.mod | 314 +- cluster-autoscaler/go.sum | 375 +- .../2018-10-01/compute/availabilitysets.go | 15 +- .../compute/mgmt/2018-10-01/compute/models.go | 17 + .../2018-10-01/compute/virtualmachines.go | 2 +- .../compute/virtualmachinescalesets.go | 3 +- .../compute/virtualmachinescalesetvms.go | 3 +- .../2019-07-01/compute/availabilitysets.go | 29 +- .../2019-07-01/compute/dedicatedhostgroups.go | 12 +- .../mgmt/2019-07-01/compute/dedicatedhosts.go | 10 +- .../2019-07-01/compute/diskencryptionsets.go | 599 + .../compute/mgmt/2019-07-01/compute/disks.go | 16 +- .../2019-07-01/compute/galleryapplications.go | 401 + .../compute/galleryapplicationversions.go | 428 + .../compute/mgmt/2019-07-01/compute/images.go | 12 +- .../mgmt/2019-07-01/compute/loganalytics.go | 4 +- .../compute/mgmt/2019-07-01/compute/models.go | 2070 ++- .../mgmt/2019-07-01/compute/operations.go | 2 +- .../compute/proximityplacementgroups.go | 12 +- .../mgmt/2019-07-01/compute/resourceskus.go | 15 +- .../mgmt/2019-07-01/compute/snapshots.go | 16 +- .../compute/mgmt/2019-07-01/compute/usage.go | 2 +- .../compute/virtualmachineextensionimages.go | 6 +- .../compute/virtualmachineextensions.go | 10 +- .../compute/virtualmachineimages.go | 10 +- .../compute/virtualmachineruncommands.go | 4 +- .../2019-07-01/compute/virtualmachines.go | 133 +- .../virtualmachinescalesetextensions.go | 8 +- .../virtualmachinescalesetrollingupgrades.go | 8 +- .../compute/virtualmachinescalesets.go | 43 +- .../virtualmachinescalesetvmextensions.go | 459 + .../compute/virtualmachinescalesetvms.go | 41 +- .../2019-07-01/compute/virtualmachinesizes.go | 2 +- .../2019-05-01/containerregistry/models.go | 4 +- .../mgmt/2019-06-01/network/bastionhosts.go | 80 + .../2019-06-01/network/connectionmonitors.go | 82 + .../2019-06-01/network/firewallpolicies.go | 582 + .../network/firewallpolicyrulegroups.go | 409 + .../network/mgmt/2019-06-01/network/models.go | 1658 ++- .../2019-06-01/network/privatelinkservices.go | 2 + .../mgmt/2019-06-01/network/servicetags.go | 4 +- .../mgmt/2019-06-01/network/subnets.go | 82 + .../virtualnetworkgatewayconnections.go | 2 +- .../network/virtualnetworkgateways.go | 2 +- .../mgmt/2017-05-10/resources/deployments.go | 72 + .../mgmt/2017-05-10/resources/models.go | 37 + .../mgmt/2019-04-01/storage/accounts.go | 74 +- .../mgmt/2019-04-01/storage/blobservices.go | 92 + .../mgmt/2019-04-01/storage/fileservices.go | 326 + .../mgmt/2019-04-01/storage/fileshares.go | 594 + .../storage/mgmt/2019-04-01/storage/models.go | 655 +- .../azure-sdk-for-go/storage/blockblob.go | 41 + .../Azure/azure-sdk-for-go/version/version.go | 2 +- .../Microsoft/hcsshim/Protobuild.toml | 41 + .../github.com/Microsoft/hcsshim/appveyor.yml | 20 +- .../Microsoft/hcsshim/hcn/hcnendpoint.go | 6 +- .../Microsoft/hcsshim/hcn/hcnglobals.go | 2 +- .../Microsoft/hcsshim/hcn/hcnnetwork.go | 20 +- .../hcsshim/internal/guestrequest/types.go | 100 - .../hcsshim/internal/hcs/callback.go | 90 +- .../Microsoft/hcsshim/internal/hcs/errors.go | 7 + .../Microsoft/hcsshim/internal/hcs/hcs.go | 2 +- .../Microsoft/hcsshim/internal/hcs/process.go | 72 +- .../Microsoft/hcsshim/internal/hcs/system.go | 80 +- .../hcsshim/internal/hcs/waithelper.go | 5 + .../hcsshim/internal/hcs/zsyscall_windows.go | 18 +- .../hcsshim/internal/hns/hnsnetwork.go | 10 +- .../hcsshim/internal/schema2/plan9_share.go | 3 +- .../github.com/Microsoft/hcsshim/vendor.conf | 18 +- .../spec/lib/go/csi/csi.pb.go | 1581 +-- .../github.com/coreos/etcd/raft/progress.go | 284 - .../coreos/etcd/raft/raftpb/raft.proto | 95 - .../github.com/coreos/etcd/raft/util.go | 129 - .../github.com/coreos/go-systemd/dbus/dbus.go | 6 +- .../coreos/go-systemd/dbus/methods.go | 8 +- .../coreos/go-systemd/dbus/subscription.go | 2 +- .../coreos/go-systemd/journal/journal.go | 143 +- .../github.com/coreos/go-systemd/util/util.go | 90 - .../coreos/go-systemd/util/util_cgo.go | 175 - .../github.com/coreos/pkg/dlopen/dlopen.go | 82 - .../coreos/pkg/dlopen/dlopen_example.go | 56 - .../github.com/docker/go-units/MAINTAINERS | 2 +- .../github.com/docker/go-units/circle.yml | 2 +- .../github.com/docker/go-units/duration.go | 2 +- .../github.com/docker/go-units/ulimit.go | 9 +- .../docker/libnetwork/ipvs/constants.go | 20 + .../github.com/docker/libnetwork/ipvs/ipvs.go | 32 +- .../docker/libnetwork/ipvs/netlink.go | 61 + .../vendor/github.com/ghodss/yaml/.travis.yml | 5 +- .../vendor/github.com/ghodss/yaml/yaml.go | 46 +- .../github.com/ghodss/yaml/yaml_go110.go | 14 - .../github.com/go-openapi/jsonpointer/go.mod | 5 +- .../github.com/go-openapi/jsonpointer/go.sum | 6 +- .../go-openapi/jsonpointer/pointer.go | 2 +- .../go-openapi/jsonreference/go.mod | 6 +- .../go-openapi/jsonreference/go.sum | 8 + .../github.com/go-openapi/spec/bindata.go | 8 +- .../vendor/github.com/go-openapi/spec/go.mod | 7 +- .../vendor/github.com/go-openapi/spec/go.sum | 8 + .../go-openapi/spec/schema_loader.go | 1 + .../github.com/go-openapi/swag/.gitignore | 1 + .../github.com/go-openapi/swag/convert.go | 7 +- .../vendor/github.com/go-openapi/swag/json.go | 2 +- .../vendor/github.com/go-openapi/swag/util.go | 21 +- .../vendor/github.com/go-openapi/swag/yaml.go | 47 +- .../vendor/github.com/godbus/dbus/.travis.yml | 8 +- .../github.com/godbus/dbus/README.markdown | 2 +- .../vendor/github.com/godbus/dbus/auth.go | 1 - .../github.com/godbus/dbus/auth_anonymous.go | 16 + .../vendor/github.com/godbus/dbus/call.go | 26 +- .../vendor/github.com/godbus/dbus/conn.go | 616 +- .../github.com/godbus/dbus/conn_darwin.go | 4 + .../github.com/godbus/dbus/conn_other.go | 63 +- .../github.com/godbus/dbus/conn_unix.go | 18 + .../github.com/godbus/dbus/conn_windows.go | 15 + .../vendor/github.com/godbus/dbus/decoder.go | 9 +- .../github.com/godbus/dbus/default_handler.go | 52 +- .../vendor/github.com/godbus/dbus/export.go | 17 +- .../vendor/github.com/godbus/dbus/go.mod | 1 + .../vendor/github.com/godbus/dbus/object.go | 118 +- .../godbus/dbus/server_interfaces.go | 10 + .../godbus/dbus/transport_generic.go | 2 +- .../godbus/dbus/transport_nonce_tcp.go | 39 + .../github.com/godbus/dbus/transport_unix.go | 30 +- .../golang/protobuf/proto/properties.go | 5 +- .../client/api/go-client/block_volume.go | 5 +- .../heketi/client/api/go-client/client.go | 22 +- .../heketi/client/api/go-client/device.go | 9 +- .../heketi/client/api/go-client/node.go | 7 +- .../heketi/client/api/go-client/operations.go | 3 +- .../heketi/client/api/go-client/volume.go | 11 +- .../heketi/heketi/pkg/glusterfs/api/types.go | 4 +- .../github.com/json-iterator/go/iter.go | 27 + .../github.com/json-iterator/go/iter_array.go | 10 +- .../json-iterator/go/iter_object.go | 24 +- .../json-iterator/go/iter_skip_sloppy.go | 19 + .../github.com/json-iterator/go/reflect.go | 5 + .../json-iterator/go/reflect_extension.go | 2 +- .../json-iterator/go/reflect_map.go | 4 + .../json-iterator/go/reflect_marshaler.go | 12 +- .../go/reflect_struct_decoder.go | 44 + .../mindprince/gonvml/.travis.gofmt.sh | 7 + .../uuid => mindprince/gonvml}/.travis.yml | 5 +- .../github.com/mindprince/gonvml/Makefile | 3 + .../github.com/mindprince/gonvml/bindings.go | 95 +- .../mindprince/gonvml/bindings_nocgo.go | 115 + .../github.com/mrunalp/fileutils/fileutils.go | 9 +- .../uuid => munnerz/goautoneg}/LICENSE | 26 +- .../github.com/munnerz/goautoneg/autoneg.go | 103 +- .../runc/libcontainer/README.md | 1 + .../runc/libcontainer/apparmor/apparmor.go | 10 +- .../runc/libcontainer/capabilities_linux.go | 6 +- .../runc/libcontainer/cgroups/fs/apply_raw.go | 35 +- .../runc/libcontainer/cgroups/fs/cpu_v2.go | 92 + .../runc/libcontainer/cgroups/fs/cpuset_v2.go | 159 + .../libcontainer/cgroups/fs/freezer_v2.go | 74 + .../runc/libcontainer/cgroups/fs/io_v2.go | 191 + .../runc/libcontainer/cgroups/fs/memory_v2.go | 164 + .../runc/libcontainer/cgroups/fs/pids_v2.go | 107 + .../runc/libcontainer/cgroups/fs/utils.go | 7 +- .../cgroups/systemd/apply_systemd.go | 140 +- .../cgroups/systemd/unified_hierarchy.go | 329 + .../runc/libcontainer/cgroups/utils.go | 58 +- .../runc/libcontainer/configs/blkio_device.go | 5 + .../runc/libcontainer/configs/cgroup_linux.go | 8 + ...group_windows.go => cgroup_unsupported.go} | 2 + .../runc/libcontainer/configs/config.go | 1 + .../runc/libcontainer/container_linux.go | 15 +- .../runc/libcontainer/rootfs_linux.go | 186 +- .../runc/libcontainer/seccomp/config.go | 1 + .../libcontainer/seccomp/seccomp_linux.go | 3 + .../libcontainer/system/syscall_linux_64.go | 2 +- .../runc/libcontainer/utils/utils_unix.go | 44 +- .../selinux/go-selinux/label/label_selinux.go | 18 +- .../selinux/go-selinux/selinux_linux.go | 33 + .../selinux/go-selinux/selinux_stub.go | 13 + .../github.com/pborman/uuid/CONTRIBUTING.md | 10 - .../github.com/pborman/uuid/CONTRIBUTORS | 1 - .../vendor/github.com/pborman/uuid/README.md | 15 - .../vendor/github.com/pborman/uuid/dce.go | 84 - .../vendor/github.com/pborman/uuid/doc.go | 13 - .../vendor/github.com/pborman/uuid/go.mod | 3 - .../vendor/github.com/pborman/uuid/go.sum | 2 - .../vendor/github.com/pborman/uuid/hash.go | 53 - .../vendor/github.com/pborman/uuid/marshal.go | 85 - .../vendor/github.com/pborman/uuid/node.go | 50 - .../vendor/github.com/pborman/uuid/sql.go | 68 - .../vendor/github.com/pborman/uuid/time.go | 57 - .../vendor/github.com/pborman/uuid/util.go | 32 - .../vendor/github.com/pborman/uuid/uuid.go | 162 - .../github.com/pborman/uuid/version1.go | 23 - .../github.com/pborman/uuid/version4.go | 26 - .../vendor/github.com/pkg/errors/.travis.yml | 12 +- .../vendor/github.com/pkg/errors/README.md | 4 +- .../vendor/github.com/pkg/errors/errors.go | 43 +- .../vendor/github.com/pkg/errors/stack.go | 51 +- .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go} | 19 +- .../client_golang/prometheus/collector.go | 2 +- .../client_golang/prometheus/doc.go | 7 +- .../client_golang/prometheus/go_collector.go | 121 +- .../client_golang/prometheus/histogram.go | 118 +- .../client_golang/prometheus/http.go | 504 - .../prometheus/process_collector.go | 61 +- .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 112 + .../prometheus/promhttp/delegator.go | 160 +- .../prometheus/promhttp/delegator_1_8.go | 181 - .../prometheus/promhttp/delegator_pre_1_8.go | 44 - .../client_golang/prometheus/promhttp/http.go | 48 +- .../prometheus/promhttp/instrument_client.go | 122 + .../promhttp/instrument_client_1_8.go | 144 - .../client_golang/prometheus/registry.go | 16 +- .../client_golang/prometheus/summary.go | 152 +- .../client_golang/prometheus/wrap.go | 21 + .../bitbucket.org/ww/goautoneg/autoneg.go | 6 +- .../prometheus/common/model/metric.go | 1 - .../prometheus/common/model/time.go | 8 +- .../prometheus/procfs/.golangci.yml | 6 + .../prometheus/procfs/MAINTAINERS.md | 3 +- .../github.com/prometheus/procfs/Makefile | 11 +- .../prometheus/procfs/Makefile.common | 141 +- .../github.com/prometheus/procfs/README.md | 44 +- .../github.com/prometheus/procfs/buddyinfo.go | 14 +- .../prometheus/procfs/fixtures.ttar | 1572 ++- .../vendor/github.com/prometheus/procfs/fs.go | 71 +- .../github.com/prometheus/procfs/go.mod | 2 + .../github.com/prometheus/procfs/go.sum | 2 + .../prometheus/procfs/internal/fs/fs.go | 52 + .../prometheus/procfs/internal/util/parse.go | 59 - .../procfs/internal/util/sysreadfile_linux.go | 45 - .../github.com/prometheus/procfs/ipvs.go | 32 +- .../github.com/prometheus/procfs/mdstat.go | 74 +- .../prometheus/procfs/mountstats.go | 43 +- .../github.com/prometheus/procfs/net_dev.go | 38 +- .../github.com/prometheus/procfs/net_unix.go | 275 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 - .../github.com/prometheus/procfs/nfs/parse.go | 317 - .../prometheus/procfs/nfs/parse_nfs.go | 67 - .../prometheus/procfs/nfs/parse_nfsd.go | 89 - .../github.com/prometheus/procfs/proc.go | 27 +- .../github.com/prometheus/procfs/proc_io.go | 4 +- .../prometheus/procfs/proc_limits.go | 7 + .../github.com/prometheus/procfs/proc_ns.go | 4 +- .../github.com/prometheus/procfs/proc_psi.go | 101 + .../github.com/prometheus/procfs/proc_stat.go | 20 +- .../prometheus/procfs/proc_status.go | 162 + .../github.com/prometheus/procfs/stat.go | 38 +- .../vendor/github.com/prometheus/procfs/ttar | 42 +- .../github.com/prometheus/procfs/xfrm.go | 2 +- .../github.com/prometheus/procfs/xfs/parse.go | 330 - .../github.com/prometheus/procfs/xfs/xfs.go | 163 - .../vendor/github.com/spf13/pflag/.travis.yml | 7 +- .../vendor/github.com/spf13/pflag/README.md | 4 +- .../github.com/spf13/pflag/bool_slice.go | 38 + .../vendor/github.com/spf13/pflag/count.go | 4 +- .../github.com/spf13/pflag/duration_slice.go | 38 + .../vendor/github.com/spf13/pflag/flag.go | 16 +- .../github.com/spf13/pflag/float32_slice.go | 174 + .../github.com/spf13/pflag/float64_slice.go | 166 + .../vendor/github.com/spf13/pflag/go.mod | 3 + .../vendor/github.com/spf13/pflag/go.sum | 0 .../github.com/spf13/pflag/int32_slice.go | 174 + .../github.com/spf13/pflag/int64_slice.go | 166 + .../github.com/spf13/pflag/int_slice.go | 30 + .../vendor/github.com/spf13/pflag/ip_slice.go | 40 +- .../github.com/spf13/pflag/string_array.go | 26 + .../github.com/spf13/pflag/string_slice.go | 22 +- .../github.com/spf13/pflag/string_to_int64.go | 149 + .../github.com/spf13/pflag/uint_slice.go | 42 + .../testify/assert/assertion_format.go | 82 + .../testify/assert/assertion_forward.go | 164 + .../testify/assert/assertion_order.go | 309 + .../stretchr/testify/assert/assertions.go | 96 +- .../github.com/stretchr/testify/mock/mock.go | 20 +- .../stretchr/testify/require/require.go | 750 +- .../stretchr/testify/require/require.go.tmpl | 2 +- .../testify/require/require_forward.go | 164 + .../gocapability/capability/capability.go | 71 +- .../capability/capability_linux.go | 16 +- .../gocapability/capability/syscall_linux.go | 2 +- .../vishvananda/netlink/CHANGELOG.md | 5 + .../github.com/vishvananda/netlink/Makefile | 5 +- .../github.com/vishvananda/netlink/README.md | 1 + .../vishvananda/netlink/addr_linux.go | 94 +- .../vishvananda/netlink/bpf_linux.go | 83 +- .../vishvananda/netlink/bridge_linux.go | 24 +- .../vishvananda/netlink/class_linux.go | 25 +- .../vishvananda/netlink/conntrack_linux.go | 14 +- .../github.com/vishvananda/netlink/filter.go | 17 +- .../vishvananda/netlink/filter_linux.go | 56 +- .../github.com/vishvananda/netlink/fou.go | 21 + .../vishvananda/netlink/fou_linux.go | 215 + .../vishvananda/netlink/fou_unspecified.go | 15 + .../vishvananda/netlink/genetlink_linux.go | 7 +- .../vishvananda/netlink/gtp_linux.go | 15 +- .../vishvananda/netlink/handle_linux.go | 18 +- .../vishvananda/netlink/handle_unspecified.go | 36 + .../vishvananda/netlink/ioctl_linux.go | 98 + .../github.com/vishvananda/netlink/link.go | 123 +- .../vishvananda/netlink/link_linux.go | 748 +- .../github.com/vishvananda/netlink/neigh.go | 2 + .../vishvananda/netlink/neigh_linux.go | 34 +- .../vishvananda/netlink/nl/addr_linux.go | 13 +- .../vishvananda/netlink/nl/link_linux.go | 42 +- .../vishvananda/netlink/nl/nl_linux.go | 236 +- .../vishvananda/netlink/nl/route_linux.go | 39 +- .../vishvananda/netlink/nl/seg6_linux.go | 111 + .../vishvananda/netlink/nl/syscall.go | 10 + .../vishvananda/netlink/nl/tc_linux.go | 35 + .../vishvananda/netlink/protinfo_linux.go | 9 +- .../github.com/vishvananda/netlink/qdisc.go | 60 + .../vishvananda/netlink/qdisc_linux.go | 140 +- .../github.com/vishvananda/netlink/route.go | 2 + .../vishvananda/netlink/route_linux.go | 245 +- .../github.com/vishvananda/netlink/rule.go | 1 + .../vishvananda/netlink/rule_linux.go | 50 +- .../vishvananda/netlink/socket_linux.go | 8 +- .../github.com/vishvananda/netlink/xfrm.go | 13 +- .../vishvananda/netlink/xfrm_monitor_linux.go | 7 +- .../vishvananda/netlink/xfrm_policy_linux.go | 19 +- .../vishvananda/netlink/xfrm_state.go | 27 +- .../vishvananda/netlink/xfrm_state_linux.go | 39 +- .../github.com/vmware/govmomi/find/finder.go | 6 + .../vmware/govmomi/vim25/soap/client.go | 1 + .../coreos => go.etcd.io}/etcd/LICENSE | 0 .../coreos => go.etcd.io}/etcd/NOTICE | 0 .../etcd/auth/authpb/auth.pb.go | 599 +- .../etcd/auth/authpb/auth.proto | 5 + .../vendor/go.etcd.io/etcd/clientv3/README.md | 85 + .../etcd/clientv3/auth.go | 53 +- .../etcd/clientv3/balancer/balancer.go | 4 +- .../balancer/connectivity/connectivity.go | 0 .../etcd/clientv3/balancer/picker/doc.go | 0 .../etcd/clientv3/balancer/picker/err.go | 0 .../etcd/clientv3/balancer/picker/picker.go | 0 .../balancer/picker/roundrobin_balanced.go | 0 .../balancer/resolver/endpoint/endpoint.go | 23 +- .../etcd/clientv3/balancer/utils.go | 0 .../etcd/clientv3/client.go | 65 +- .../etcd/clientv3/cluster.go | 43 +- .../etcd/clientv3/compact_op.go | 2 +- .../etcd/clientv3/compare.go | 2 +- .../etcd/clientv3/config.go | 10 +- .../etcd/clientv3/credentials/credentials.go | 66 +- .../etcd/clientv3/doc.go | 19 +- .../coreos => go.etcd.io}/etcd/clientv3/kv.go | 2 +- .../etcd/clientv3/lease.go | 44 +- .../etcd/clientv3/logger.go | 2 +- .../etcd/clientv3/maintenance.go | 2 +- .../coreos => go.etcd.io}/etcd/clientv3/op.go | 70 +- .../etcd/clientv3/options.go | 0 .../etcd/clientv3/retry.go | 8 +- .../etcd/clientv3/retry_interceptor.go | 2 +- .../etcd/clientv3/sort.go | 0 .../etcd/clientv3/txn.go | 2 +- .../etcd/clientv3/utils.go | 0 .../etcd/clientv3/watch.go | 6 +- .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 0 .../etcdserver/api/v3rpc/rpctypes/error.go | 18 + .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 0 .../api/v3rpc/rpctypes/metadatafields.go | 0 .../etcdserver/etcdserverpb/etcdserver.pb.go | 571 +- .../etcdserver/etcdserverpb/etcdserver.proto | 0 .../etcdserverpb/raft_internal.pb.go | 1266 +- .../etcdserverpb/raft_internal.proto | 3 +- .../etcdserverpb/raft_internal_stringer.go | 0 .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 11360 +++++----------- .../etcd/etcdserver/etcdserverpb/rpc.proto | 179 +- .../etcd/mvcc/mvccpb/kv.pb.go | 362 +- .../etcd/mvcc/mvccpb/kv.proto | 0 .../etcd/pkg/logutil/discard_logger.go | 0 .../etcd/pkg/logutil/doc.go | 0 .../etcd/pkg/logutil/log_level.go | 0 .../etcd/pkg/logutil/logger.go | 0 .../etcd/pkg/logutil/merge_logger.go | 0 .../etcd/pkg/logutil/package_logger.go | 2 +- .../etcd/pkg/logutil/zap.go | 10 +- .../etcd/pkg/logutil/zap_grpc.go | 0 .../etcd/pkg/logutil/zap_journal.go | 2 +- .../etcd/pkg/logutil/zap_raft.go | 2 +- .../etcd/pkg/systemd/doc.go | 0 .../etcd/pkg/systemd/journal.go | 0 .../etcd/pkg/tlsutil/cipher_suites.go | 0 .../etcd/pkg/tlsutil/doc.go | 0 .../etcd/pkg/tlsutil/tlsutil.go | 1 + .../etcd/pkg/transport/doc.go | 0 .../etcd/pkg/transport/keepalive_listener.go | 0 .../etcd/pkg/transport/limit_listen.go | 0 .../etcd/pkg/transport/listener.go | 214 +- .../etcd/pkg/transport/listener_tls.go | 0 .../etcd/pkg/transport/timeout_conn.go | 0 .../etcd/pkg/transport/timeout_dialer.go | 0 .../etcd/pkg/transport/timeout_listener.go | 2 +- .../etcd/pkg/transport/timeout_transport.go | 0 .../etcd/pkg/transport/tls.go | 0 .../etcd/pkg/transport/transport.go | 0 .../etcd/pkg/transport/unix_listener.go | 0 .../etcd/pkg/types/doc.go | 0 .../etcd/pkg/types/id.go | 4 +- .../etcd/pkg/types/set.go | 17 + .../etcd/pkg/types/slice.go | 0 .../etcd/pkg/types/urls.go | 0 .../etcd/pkg/types/urlsmap.go | 0 .../vendor/go.etcd.io/etcd/raft/OWNERS | 19 + .../coreos => go.etcd.io}/etcd/raft/README.md | 17 +- .../vendor/go.etcd.io/etcd/raft/bootstrap.go | 80 + .../etcd/raft/confchange/confchange.go | 425 + .../etcd/raft/confchange/restore.go | 155 + .../coreos => go.etcd.io}/etcd/raft/design.md | 0 .../coreos => go.etcd.io}/etcd/raft/doc.go | 12 +- .../coreos => go.etcd.io}/etcd/raft/log.go | 30 +- .../etcd/raft/log_unstable.go | 10 +- .../coreos => go.etcd.io}/etcd/raft/logger.go | 8 +- .../coreos => go.etcd.io}/etcd/raft/node.go | 313 +- .../go.etcd.io/etcd/raft/quorum/joint.go | 75 + .../go.etcd.io/etcd/raft/quorum/majority.go | 210 + .../go.etcd.io/etcd/raft/quorum/quorum.go | 58 + .../etcd/raft/quorum/voteresult_string.go | 26 + .../coreos => go.etcd.io}/etcd/raft/raft.go | 925 +- .../go.etcd.io/etcd/raft/raftpb/confchange.go | 170 + .../go.etcd.io/etcd/raft/raftpb/confstate.go | 45 + .../etcd/raft/raftpb/raft.pb.go | 2007 +-- .../go.etcd.io/etcd/raft/raftpb/raft.proto | 177 + .../etcd/raft/rawnode.go | 193 +- .../etcd/raft/read_only.go | 27 +- .../coreos => go.etcd.io}/etcd/raft/status.go | 52 +- .../etcd/raft/storage.go | 4 +- .../go.etcd.io/etcd/raft/tracker/inflights.go | 132 + .../go.etcd.io/etcd/raft/tracker/progress.go | 259 + .../go.etcd.io/etcd/raft/tracker/state.go | 42 + .../go.etcd.io/etcd/raft/tracker/tracker.go | 288 + .../vendor/go.etcd.io/etcd/raft/util.go | 233 + .../vendor/go.uber.org/atomic/.travis.yml | 8 +- .../vendor/go.uber.org/atomic/README.md | 2 +- .../vendor/go.uber.org/multierr/.travis.yml | 2 +- .../vendor/go.uber.org/multierr/error.go | 2 +- .../vendor/go.uber.org/zap/.travis.yml | 4 +- .../vendor/go.uber.org/zap/CHANGELOG.md | 22 + .../vendor/go.uber.org/zap/Makefile | 4 +- .../vendor/go.uber.org/zap/global.go | 1 - .../{atomic/error.go => zap/global_go112.go} | 39 +- .../vendor/go.uber.org/zap/global_prego112.go | 26 + .../vendor/go.uber.org/zap/zapcore/field.go | 13 +- .../go.uber.org/zap/zapcore/json_encoder.go | 3 + .../go.uber.org/zap/zapcore/memory_encoder.go | 2 +- .../golang.org/x/crypto/ed25519/ed25519.go | 5 + .../x/crypto/ed25519/ed25519_go113.go | 73 + .../golang.org/x/crypto/pkcs12/pkcs12.go | 7 +- .../x/crypto/ssh/terminal/terminal.go | 71 +- .../golang.org/x/crypto/ssh/terminal/util.go | 4 +- .../x/crypto/ssh/terminal/util_aix.go | 12 + .../x/crypto/ssh/terminal/util_plan9.go | 2 +- .../x/crypto/ssh/terminal/util_solaris.go | 2 +- .../x/crypto/ssh/terminal/util_windows.go | 8 +- .../golang.org/x/net/bpf/vm_instructions.go | 3 +- .../vendor/golang.org/x/net/html/parse.go | 130 +- .../vendor/golang.org/x/net/http2/frame.go | 2 +- .../vendor/golang.org/x/net/http2/server.go | 44 +- .../golang.org/x/net/http2/transport.go | 19 +- .../x/net/http2/writesched_random.go | 9 +- .../x/net/idna/{idna.go => idna10.0.0.go} | 8 +- .../vendor/golang.org/x/net/idna/idna9.0.0.go | 682 + .../x/net/idna/{tables.go => tables10.0.0.go} | 6 +- .../golang.org/x/net/idna/tables11.0.0.go | 4653 +++++++ .../golang.org/x/net/idna/tables9.0.0.go | 4486 ++++++ .../x/net/internal/socket/cmsghdr.go | 2 +- .../x/net/internal/socket/cmsghdr_bsd.go | 2 +- .../internal/socket/cmsghdr_linux_64bit.go | 2 +- .../x/net/internal/socket/cmsghdr_stub.go | 2 +- .../x/net/internal/socket/defs_aix.go | 39 + .../x/net/internal/socket/defs_darwin.go | 8 - .../x/net/internal/socket/defs_dragonfly.go | 8 - .../x/net/internal/socket/defs_freebsd.go | 8 - .../x/net/internal/socket/defs_linux.go | 8 - .../x/net/internal/socket/defs_netbsd.go | 8 - .../x/net/internal/socket/defs_openbsd.go | 8 - .../x/net/internal/socket/defs_solaris.go | 8 - .../x/net/internal/socket/error_unix.go | 2 +- .../x/net/internal/socket/iovec_64bit.go | 4 +- .../x/net/internal/socket/iovec_stub.go | 2 +- .../x/net/internal/socket/mmsghdr_stub.go | 2 +- .../x/net/internal/socket/mmsghdr_unix.go | 2 +- .../x/net/internal/socket/msghdr_bsd.go | 2 +- .../x/net/internal/socket/msghdr_bsdvar.go | 2 +- .../net/internal/socket/msghdr_linux_64bit.go | 2 +- .../x/net/internal/socket/msghdr_stub.go | 2 +- .../x/net/internal/socket/rawconn.go | 2 - .../x/net/internal/socket/rawconn_mmsg.go | 1 - .../x/net/internal/socket/rawconn_msg.go | 3 +- .../x/net/internal/socket/rawconn_nommsg.go | 7 +- .../x/net/internal/socket/rawconn_nomsg.go | 9 +- .../x/net/internal/socket/rawconn_stub.go | 25 - .../x/net/internal/socket/reflect.go | 62 - .../x/net/internal/socket/socket.go | 3 + .../golang.org/x/net/internal/socket/sys.go | 2 +- .../x/net/internal/socket/sys_bsd.go | 8 +- .../x/net/internal/socket/sys_bsdvar.go | 7 +- .../x/net/internal/socket/sys_const_unix.go | 17 + .../{sys_go1_12_darwin.go => sys_linkname.go} | 2 +- .../net/internal/socket/sys_linux_riscv64.go | 12 + .../x/net/internal/socket/sys_posix.go | 9 +- .../x/net/internal/socket/sys_solaris.go | 5 +- .../x/net/internal/socket/sys_stub.go | 17 +- .../x/net/internal/socket/sys_windows.go | 19 +- .../x/net/internal/socket/zsys_aix_ppc64.go | 61 + .../x/net/internal/socket/zsys_darwin_386.go | 10 +- .../net/internal/socket/zsys_darwin_amd64.go | 10 +- .../x/net/internal/socket/zsys_darwin_arm.go | 10 +- .../net/internal/socket/zsys_darwin_arm64.go | 10 +- .../internal/socket/zsys_dragonfly_amd64.go | 10 +- .../x/net/internal/socket/zsys_freebsd_386.go | 10 +- .../net/internal/socket/zsys_freebsd_amd64.go | 10 +- .../x/net/internal/socket/zsys_freebsd_arm.go | 10 +- .../net/internal/socket/zsys_freebsd_arm64.go | 53 + .../x/net/internal/socket/zsys_linux_386.go | 10 +- .../x/net/internal/socket/zsys_linux_amd64.go | 10 +- .../x/net/internal/socket/zsys_linux_arm.go | 10 +- .../x/net/internal/socket/zsys_linux_arm64.go | 10 +- .../x/net/internal/socket/zsys_linux_mips.go | 10 +- .../net/internal/socket/zsys_linux_mips64.go | 10 +- .../internal/socket/zsys_linux_mips64le.go | 10 +- .../net/internal/socket/zsys_linux_mipsle.go | 10 +- .../x/net/internal/socket/zsys_linux_ppc64.go | 10 +- .../net/internal/socket/zsys_linux_ppc64le.go | 10 +- .../net/internal/socket/zsys_linux_riscv64.go | 59 + .../x/net/internal/socket/zsys_linux_s390x.go | 10 +- .../x/net/internal/socket/zsys_netbsd_386.go | 10 +- .../net/internal/socket/zsys_netbsd_amd64.go | 10 +- .../x/net/internal/socket/zsys_netbsd_arm.go | 10 +- .../net/internal/socket/zsys_netbsd_arm64.go | 60 + .../x/net/internal/socket/zsys_openbsd_386.go | 10 +- .../net/internal/socket/zsys_openbsd_amd64.go | 10 +- .../x/net/internal/socket/zsys_openbsd_arm.go | 10 +- .../net/internal/socket/zsys_openbsd_arm64.go | 53 + .../net/internal/socket/zsys_solaris_amd64.go | 10 +- .../vendor/golang.org/x/net/ipv4/batch.go | 8 +- .../golang.org/x/net/ipv4/control_bsd.go | 2 +- .../golang.org/x/net/ipv4/control_stub.go | 4 +- .../golang.org/x/net/ipv4/control_unix.go | 2 +- .../golang.org/x/net/ipv4/control_windows.go | 8 +- .../vendor/golang.org/x/net/ipv4/defs_aix.go | 39 + .../vendor/golang.org/x/net/ipv4/dgramopt.go | 30 +- .../vendor/golang.org/x/net/ipv4/doc.go | 5 +- .../vendor/golang.org/x/net/ipv4/endpoint.go | 2 +- .../golang.org/x/net/ipv4/genericopt.go | 8 +- .../vendor/golang.org/x/net/ipv4/header.go | 11 +- .../vendor/golang.org/x/net/ipv4/helper.go | 24 +- .../vendor/golang.org/x/net/ipv4/packet.go | 53 +- .../golang.org/x/net/ipv4/packet_go1_8.go | 56 - .../golang.org/x/net/ipv4/packet_go1_9.go | 67 - .../golang.org/x/net/ipv4/payload_cmsg.go | 59 +- .../x/net/ipv4/payload_cmsg_go1_8.go | 59 - .../x/net/ipv4/payload_cmsg_go1_9.go | 67 - .../golang.org/x/net/ipv4/payload_nocmsg.go | 2 +- .../golang.org/x/net/ipv4/sockopt_posix.go | 6 +- .../golang.org/x/net/ipv4/sockopt_stub.go | 16 +- .../vendor/golang.org/x/net/ipv4/sys_aix.go | 38 + .../golang.org/x/net/ipv4/sys_asmreq.go | 2 +- .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 8 +- .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 4 +- .../golang.org/x/net/ipv4/sys_bpf_stub.go | 2 +- .../golang.org/x/net/ipv4/sys_freebsd.go | 2 +- .../golang.org/x/net/ipv4/sys_ssmreq.go | 6 +- .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 4 +- .../vendor/golang.org/x/net/ipv4/sys_stub.go | 2 +- .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 33 + .../golang.org/x/net/ipv4/zsys_darwin.go | 2 +- .../golang.org/x/net/ipv4/zsys_dragonfly.go | 2 +- .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 2 +- .../x/net/ipv4/zsys_freebsd_amd64.go | 2 +- .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_386.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_arm.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_mips.go | 2 +- .../x/net/ipv4/zsys_linux_mips64.go | 2 +- .../x/net/ipv4/zsys_linux_mips64le.go | 2 +- .../x/net/ipv4/zsys_linux_mipsle.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 2 +- .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 2 +- .../x/net/ipv4/zsys_linux_ppc64le.go | 2 +- .../x/net/ipv4/zsys_linux_riscv64.go | 151 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 2 +- .../golang.org/x/net/ipv4/zsys_netbsd.go | 2 +- .../golang.org/x/net/ipv4/zsys_openbsd.go | 2 +- .../golang.org/x/net/ipv4/zsys_solaris.go | 2 +- .../vendor/golang.org/x/net/ipv6/batch.go | 2 - .../x/net/ipv6/control_rfc3542_unix.go | 2 +- .../golang.org/x/net/ipv6/control_stub.go | 4 +- .../golang.org/x/net/ipv6/control_unix.go | 2 +- .../golang.org/x/net/ipv6/control_windows.go | 8 +- .../vendor/golang.org/x/net/ipv6/defs_aix.go | 82 + .../vendor/golang.org/x/net/ipv6/dgramopt.go | 34 +- .../vendor/golang.org/x/net/ipv6/doc.go | 3 +- .../vendor/golang.org/x/net/ipv6/endpoint.go | 2 +- .../golang.org/x/net/ipv6/genericopt.go | 8 +- .../vendor/golang.org/x/net/ipv6/helper.go | 3 +- .../vendor/golang.org/x/net/ipv6/icmp_bsd.go | 2 +- .../vendor/golang.org/x/net/ipv6/icmp_stub.go | 2 +- .../golang.org/x/net/ipv6/payload_cmsg.go | 46 +- .../x/net/ipv6/payload_cmsg_go1_8.go | 55 - .../x/net/ipv6/payload_cmsg_go1_9.go | 57 - .../golang.org/x/net/ipv6/payload_nocmsg.go | 2 +- .../golang.org/x/net/ipv6/sockopt_posix.go | 12 +- .../golang.org/x/net/ipv6/sockopt_stub.go | 18 +- .../vendor/golang.org/x/net/ipv6/sys_aix.go | 77 + .../golang.org/x/net/ipv6/sys_asmreq.go | 2 +- .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 4 +- .../golang.org/x/net/ipv6/sys_bpf_stub.go | 2 +- .../golang.org/x/net/ipv6/sys_freebsd.go | 2 +- .../golang.org/x/net/ipv6/sys_ssmreq.go | 8 +- .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 6 +- .../vendor/golang.org/x/net/ipv6/sys_stub.go | 2 +- .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 103 + .../golang.org/x/net/ipv6/zsys_darwin.go | 2 +- .../golang.org/x/net/ipv6/zsys_dragonfly.go | 2 +- .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 2 +- .../x/net/ipv6/zsys_freebsd_amd64.go | 2 +- .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_386.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_arm.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_mips.go | 2 +- .../x/net/ipv6/zsys_linux_mips64.go | 2 +- .../x/net/ipv6/zsys_linux_mips64le.go | 2 +- .../x/net/ipv6/zsys_linux_mipsle.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 2 +- .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 2 +- .../x/net/ipv6/zsys_linux_ppc64le.go | 2 +- .../x/net/ipv6/zsys_linux_riscv64.go | 173 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 2 +- .../golang.org/x/net/ipv6/zsys_netbsd.go | 2 +- .../golang.org/x/net/ipv6/zsys_openbsd.go | 2 +- .../golang.org/x/net/ipv6/zsys_solaris.go | 2 +- .../vendor/golang.org/x/net/proxy/dial.go | 54 + .../vendor/golang.org/x/net/proxy/direct.go | 15 +- .../vendor/golang.org/x/net/proxy/per_host.go | 15 + .../vendor/golang.org/x/net/proxy/proxy.go | 24 +- .../vendor/golang.org/x/net/proxy/socks5.go | 10 +- .../golang.org/x/oauth2/google/google.go | 11 +- .../vendor/golang.org/x/oauth2/jwt/jwt.go | 21 +- .../vendor/golang.org/x/oauth2/oauth2.go | 2 +- .../vendor/golang.org/x/sys/unix/README.md | 16 +- .../golang.org/x/sys/unix/affinity_linux.go | 8 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 54 + .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 + .../vendor/golang.org/x/sys/unix/dirent.go | 91 +- .../golang.org/x/sys/unix/endian_little.go | 2 +- .../vendor/golang.org/x/sys/unix/mkall.sh | 35 +- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 13 +- .../vendor/golang.org/x/sys/unix/mkpost.go | 20 +- .../vendor/golang.org/x/sys/unix/mksyscall.go | 7 +- .../x/sys/unix/mksyscall_aix_ppc.go | 13 +- .../x/sys/unix/mksyscall_aix_ppc64.go | 14 +- .../x/sys/unix/mksyscall_solaris.go | 335 + .../x/sys/unix/mksyscall_solaris.pl | 294 - .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 + .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 265 - .../vendor/golang.org/x/sys/unix/mksysnum.go | 2 +- .../{openbsd_pledge.go => pledge_openbsd.go} | 3 - .../x/sys/unix/readdirent_getdents.go | 12 + .../x/sys/unix/readdirent_getdirentries.go | 19 + .../golang.org/x/sys/unix/sockcmsg_unix.go | 15 +- .../vendor/golang.org/x/sys/unix/syscall.go | 1 - .../golang.org/x/sys/unix/syscall_aix.go | 48 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 16 + .../x/sys/unix/syscall_aix_ppc64.go | 47 + .../golang.org/x/sys/unix/syscall_bsd.go | 12 +- .../golang.org/x/sys/unix/syscall_darwin.go | 30 +- .../x/sys/unix/syscall_darwin_386.go | 2 + .../x/sys/unix/syscall_darwin_amd64.go | 2 + .../x/sys/unix/syscall_darwin_arm.go | 4 + .../x/sys/unix/syscall_darwin_arm64.go | 4 + .../x/sys/unix/syscall_dragonfly.go | 17 + .../golang.org/x/sys/unix/syscall_freebsd.go | 126 +- .../golang.org/x/sys/unix/syscall_linux.go | 167 +- .../x/sys/unix/syscall_linux_arm.go | 19 + .../x/sys/unix/syscall_linux_arm64.go | 13 + .../x/sys/unix/syscall_linux_riscv64.go | 13 + .../golang.org/x/sys/unix/syscall_netbsd.go | 37 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 54 +- .../x/sys/unix/syscall_openbsd_arm64.go | 37 + .../golang.org/x/sys/unix/syscall_solaris.go | 17 + .../golang.org/x/sys/unix/syscall_unix.go | 54 +- .../vendor/golang.org/x/sys/unix/types_aix.go | 27 +- .../golang.org/x/sys/unix/types_darwin.go | 6 + .../golang.org/x/sys/unix/types_freebsd.go | 50 +- .../golang.org/x/sys/unix/types_netbsd.go | 1 + .../golang.org/x/sys/unix/types_openbsd.go | 7 + .../{openbsd_unveil.go => unveil_openbsd.go} | 2 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 2 + .../x/sys/unix/zerrors_aix_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_386.go | 242 +- .../x/sys/unix/zerrors_linux_amd64.go | 242 +- .../x/sys/unix/zerrors_linux_arm.go | 242 +- .../x/sys/unix/zerrors_linux_arm64.go | 242 +- .../x/sys/unix/zerrors_linux_mips.go | 242 +- .../x/sys/unix/zerrors_linux_mips64.go | 242 +- .../x/sys/unix/zerrors_linux_mips64le.go | 242 +- .../x/sys/unix/zerrors_linux_mipsle.go | 242 +- .../x/sys/unix/zerrors_linux_ppc64.go | 242 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 242 +- .../x/sys/unix/zerrors_linux_riscv64.go | 242 +- .../x/sys/unix/zerrors_linux_s390x.go | 242 +- .../x/sys/unix/zerrors_linux_sparc64.go | 242 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 1789 +++ .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 52 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 48 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 54 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 44 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 20 +- .../x/sys/unix/zsyscall_darwin_386.go | 30 +- .../x/sys/unix/zsyscall_darwin_386.s | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 30 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 4 +- .../x/sys/unix/zsyscall_darwin_arm.go | 15 - .../x/sys/unix/zsyscall_darwin_arm.s | 2 - .../x/sys/unix/zsyscall_darwin_arm64.go | 15 - .../x/sys/unix/zsyscall_darwin_arm64.s | 2 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 17 + .../x/sys/unix/zsyscall_freebsd_386.go | 12 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 12 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 12 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 12 +- .../x/sys/unix/zsyscall_linux_386.go | 75 +- .../x/sys/unix/zsyscall_linux_amd64.go | 75 +- .../x/sys/unix/zsyscall_linux_arm.go | 100 +- .../x/sys/unix/zsyscall_linux_arm64.go | 90 +- .../x/sys/unix/zsyscall_linux_mips.go | 75 +- .../x/sys/unix/zsyscall_linux_mips64.go | 75 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 75 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 75 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 75 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 75 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 90 +- .../x/sys/unix/zsyscall_linux_s390x.go | 75 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 75 +- .../x/sys/unix/zsyscall_netbsd_386.go | 2 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_386.go | 2 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 +++ .../x/sys/unix/zsyscall_solaris_amd64.go | 2 +- .../x/sys/unix/zsysctl_openbsd_386.go | 2 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 2 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 4 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_freebsd_386.go | 23 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 445 +- .../x/sys/unix/zsysnum_linux_386.go | 806 +- .../x/sys/unix/zsysnum_linux_amd64.go | 10 + .../x/sys/unix/zsysnum_linux_arm.go | 742 +- .../x/sys/unix/zsysnum_linux_arm64.go | 11 + .../x/sys/unix/zsysnum_linux_mips.go | 776 +- .../x/sys/unix/zsysnum_linux_mips64.go | 10 + .../x/sys/unix/zsysnum_linux_mips64le.go | 10 + .../x/sys/unix/zsysnum_linux_mipsle.go | 776 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 21 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 21 + .../x/sys/unix/zsysnum_linux_riscv64.go | 11 + .../x/sys/unix/zsysnum_linux_s390x.go | 24 + .../x/sys/unix/zsysnum_linux_sparc64.go | 26 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 217 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 45 +- .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 50 +- .../x/sys/unix/ztypes_darwin_386.go | 46 +- .../x/sys/unix/ztypes_darwin_amd64.go | 48 +- .../x/sys/unix/ztypes_darwin_arm.go | 46 +- .../x/sys/unix/ztypes_darwin_arm64.go | 48 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 38 +- .../x/sys/unix/ztypes_freebsd_386.go | 185 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 182 +- .../x/sys/unix/ztypes_freebsd_arm.go | 159 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 160 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 780 +- .../x/sys/unix/ztypes_linux_amd64.go | 781 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 780 +- .../x/sys/unix/ztypes_linux_arm64.go | 781 +- .../x/sys/unix/ztypes_linux_mips.go | 780 +- .../x/sys/unix/ztypes_linux_mips64.go | 781 +- .../x/sys/unix/ztypes_linux_mips64le.go | 781 +- .../x/sys/unix/ztypes_linux_mipsle.go | 780 +- .../x/sys/unix/ztypes_linux_ppc64.go | 781 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 781 +- .../x/sys/unix/ztypes_linux_riscv64.go | 782 +- .../x/sys/unix/ztypes_linux_s390x.go | 781 +- .../x/sys/unix/ztypes_linux_sparc64.go | 781 +- .../x/sys/unix/ztypes_netbsd_386.go | 35 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 41 +- .../x/sys/unix/ztypes_netbsd_arm.go | 41 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 41 +- .../x/sys/unix/ztypes_openbsd_386.go | 11 + .../x/sys/unix/ztypes_openbsd_amd64.go | 11 + .../x/sys/unix/ztypes_openbsd_arm.go | 11 + .../x/sys/unix/ztypes_openbsd_arm64.go | 565 + .../golang.org/x/sys/windows/dll_windows.go | 8 +- .../golang.org/x/sys/windows/env_windows.go | 34 +- .../golang.org/x/sys/windows/mkerrors.bash | 63 + .../x/sys/windows/mkknownfolderids.bash | 27 + .../golang.org/x/sys/windows/mksyscall.go | 2 + .../x/sys/windows/registry/mksyscall.go | 2 + .../x/sys/windows/registry/value.go | 9 +- .../x/sys/windows/security_windows.go | 402 +- .../golang.org/x/sys/windows/service.go | 74 +- .../golang.org/x/sys/windows/svc/service.go | 39 +- .../golang.org/x/sys/windows/svc/sys_386.s | 3 +- .../golang.org/x/sys/windows/svc/sys_amd64.s | 2 + .../x/sys/windows/syscall_windows.go | 116 +- .../golang.org/x/sys/windows/types_windows.go | 291 +- .../x/sys/windows/zerrors_windows.go | 6853 ++++++++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 635 +- .../golang.org/x/text/transform/transform.go | 2 +- .../x/text/unicode/bidi/tables10.0.0.go | 2 +- .../x/text/unicode/bidi/tables11.0.0.go | 1887 +++ .../x/text/unicode/norm/tables10.0.0.go | 2 +- .../x/text/unicode/norm/tables11.0.0.go | 7693 +++++++++++ .../golang.org/x/text/width/kind_string.go | 12 + .../golang.org/x/text/width/tables10.0.0.go | 2 +- .../golang.org/x/text/width/tables11.0.0.go | 1330 ++ .../vendor/golang.org/x/time/rate/rate.go | 11 +- .../api/annotations/annotations.pb.go | 54 - .../googleapis/api/annotations/client.pb.go | 76 - .../api/annotations/field_behavior.pb.go | 119 - .../googleapis/api/annotations/http.pb.go | 745 - .../googleapis/api/annotations/resource.pb.go | 321 - .../grpc/internal/transport/http2_server.go | 5 +- .../grpc/internal/transport/http_util.go | 1 + .../vendor/google.golang.org/grpc/version.go | 2 +- .../vendor/gopkg.in/inf.v0/dec.go | 2 +- .../vendor/gopkg.in/yaml.v2/decode.go | 38 + .../vendor/gopkg.in/yaml.v2/resolve.go | 2 +- .../vendor/gopkg.in/yaml.v2/scannerc.go | 16 + .../v1beta1/generated.proto | 4 +- .../admissionregistration/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../vendor/k8s.io/api/autoscaling/v1/types.go | 4 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 4 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 46 +- .../vendor/k8s.io/api/core/v1/BUILD | 1 + .../vendor/k8s.io/api/core/v1/generated.pb.go | 1796 +-- .../vendor/k8s.io/api/core/v1/generated.proto | 30 +- .../vendor/k8s.io/api/core/v1/types.go | 54 +- .../core/v1/types_swagger_doc_generated.go | 28 +- .../k8s.io/api/core/v1/well_known_labels.go | 16 +- .../k8s.io/api/core/v1/well_known_taints.go | 55 + .../api/core/v1/zz_generated.deepcopy.go | 5 + .../k8s.io/api/discovery/v1alpha1/BUILD | 1 + .../api/discovery/v1alpha1/generated.pb.go | 161 +- .../api/discovery/v1alpha1/generated.proto | 35 +- .../k8s.io/api/discovery/v1alpha1/types.go | 46 +- .../v1alpha1/types_swagger_doc_generated.go | 13 +- .../discovery/v1alpha1/well_known_labels.go | 28 + .../v1alpha1/zz_generated.deepcopy.go | 10 +- .../vendor/k8s.io/api/discovery/v1beta1/BUILD | 39 + .../version => api/discovery/v1beta1}/doc.go | 9 +- .../api/discovery/v1beta1/generated.pb.go | 1730 +++ .../api/discovery/v1beta1/generated.proto | 157 + .../k8s.io/api/discovery/v1beta1/register.go | 56 + .../k8s.io/api/discovery/v1beta1/types.go | 162 + .../v1beta1/types_swagger_doc_generated.go | 86 + .../discovery/v1beta1/well_known_labels.go | 28 + .../v1beta1/zz_generated.deepcopy.go | 195 + .../k8s.io/api/extensions/v1beta1/types.go | 2 +- .../k8s.io/api/flowcontrol/v1alpha1/BUILD | 36 + .../k8s.io/api/flowcontrol/v1alpha1/doc.go | 24 + .../api/flowcontrol/v1alpha1/generated.pb.go | 5459 ++++++++ .../api/flowcontrol/v1alpha1/generated.proto | 436 + .../api/flowcontrol/v1alpha1/register.go | 58 + .../k8s.io/api/flowcontrol/v1alpha1/types.go | 513 + .../v1alpha1/types_swagger_doc_generated.go | 258 + .../v1alpha1/zz_generated.deepcopy.go | 541 + .../k8s.io/api/policy/v1beta1/generated.proto | 2 +- .../vendor/k8s.io/api/policy/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 14 +- .../vendor/k8s.io/api/rbac/v1alpha1/types.go | 14 +- .../v1alpha1/types_swagger_doc_generated.go | 16 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 12 +- .../vendor/k8s.io/api/rbac/v1beta1/types.go | 12 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../k8s.io/api/storage/v1/generated.pb.go | 1317 +- .../k8s.io/api/storage/v1/generated.proto | 84 + .../vendor/k8s.io/api/storage/v1/register.go | 3 + .../vendor/k8s.io/api/storage/v1/types.go | 96 +- .../storage/v1/types_swagger_doc_generated.go | 50 + .../api/storage/v1/zz_generated.deepcopy.go | 130 + .../api/storage/v1beta1/generated.proto | 2 + .../k8s.io/api/storage/v1beta1/types.go | 2 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../pkg/features/kube_features.go | 6 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 - .../apimachinery/pkg/api/errors/errors.go | 23 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 4 - .../apimachinery/pkg/api/resource/OWNERS | 2 - .../pkg/apis/meta/internalversion/BUILD | 27 +- .../pkg/apis/meta/internalversion/register.go | 17 +- .../apis/meta/internalversion/scheme/BUILD | 48 + .../apis/meta/internalversion/scheme/doc.go} | 11 +- .../meta/internalversion/scheme/register.go | 39 + .../apimachinery/pkg/apis/meta/v1/BUILD | 2 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/controller_ref.go | 19 +- .../pkg/apis/meta/v1/conversion.go | 101 +- .../apimachinery/pkg/apis/meta/v1/doc.go | 1 + .../pkg/apis/meta/v1/generated.proto | 15 +- .../apimachinery/pkg/apis/meta/v1/register.go | 85 +- .../apimachinery/pkg/apis/meta/v1/types.go | 23 +- .../meta/v1/types_swagger_doc_generated.go | 6 +- .../pkg/apis/meta/v1/unstructured/BUILD | 2 + .../pkg/apis/meta/v1/unstructured/helpers.go | 54 +- .../apis/meta/v1/zz_generated.conversion.go | 523 + .../apimachinery/pkg/apis/meta/v1beta1/BUILD | 1 + .../pkg/apis/meta/v1beta1/register.go | 14 +- .../apimachinery/pkg/labels/selector.go | 43 +- .../k8s.io/apimachinery/pkg/runtime/BUILD | 1 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 92 +- .../apimachinery/pkg/runtime/conversion.go | 97 +- .../apimachinery/pkg/runtime/interfaces.go | 66 + .../apimachinery/pkg/runtime/negotiate.go | 146 + .../apimachinery/pkg/runtime/register.go | 30 - .../pkg/runtime/schema/group_version.go | 14 + .../pkg/runtime/serializer/json/BUILD | 4 + .../pkg/runtime/serializer/json/json.go | 48 +- .../pkg/runtime/serializer/protobuf/BUILD | 16 +- .../runtime/serializer/protobuf/protobuf.go | 59 +- .../pkg/runtime/serializer/versioning/BUILD | 2 + .../serializer/versioning/versioning.go | 82 +- .../pkg/runtime/serializer/yaml/BUILD | 31 - .../pkg/runtime/serializer/yaml/yaml.go | 46 - .../k8s.io/apimachinery/pkg/runtime/types.go | 13 - .../pkg/runtime/zz_generated.deepcopy.go | 33 - .../k8s.io/apimachinery/pkg/util/cache/BUILD | 10 +- .../apimachinery/pkg/util/cache/cache.go | 83 - .../apimachinery/pkg/util/cache/expiring.go | 192 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 39 + .../apimachinery/pkg/util/intstr/intstr.go | 2 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 28 +- .../pkg/util/naming/from_stack.go | 2 +- .../apimachinery/pkg/util/net/interface.go | 73 +- .../apimachinery/pkg/util/runtime/runtime.go | 11 +- .../pkg/util/strategicpatch/patch.go | 2 +- .../pkg/util/validation/field/errors.go | 2 +- .../pkg/util/validation/validation.go | 46 +- .../k8s.io/apiserver/pkg/admission/BUILD | 3 +- .../k8s.io/apiserver/pkg/admission/config.go | 6 +- .../pkg/admission/configuration/BUILD | 6 +- .../configuration/mutating_webhook_manager.go | 10 +- .../validating_webhook_manager.go | 10 +- .../apiserver/pkg/admission/initializer/BUILD | 1 + .../pkg/admission/initializer/initializer.go | 11 +- .../pkg/admission/initializer/interfaces.go | 12 + .../apiserver/pkg/admission/metrics/BUILD | 2 +- .../plugin/namespace/lifecycle/BUILD | 2 + .../plugin/namespace/lifecycle/admission.go | 11 +- .../pkg/admission/plugin/webhook/BUILD | 4 +- .../pkg/admission/plugin/webhook/accessors.go | 52 +- .../pkg/admission/plugin/webhook/config/BUILD | 9 +- .../config/apis/webhookadmission/BUILD | 1 + .../config/apis/webhookadmission/register.go | 4 +- .../config/apis/webhookadmission/v1/BUILD | 37 + .../config/apis/webhookadmission/v1/doc.go} | 17 +- .../apis/webhookadmission/v1/register.go | 50 + .../config/apis/webhookadmission/v1/types.go} | 25 +- .../v1/zz_generated.conversion.go | 67 + .../v1/zz_generated.deepcopy.go | 50 + .../v1/zz_generated.defaults.go | 32 + .../plugin/webhook/config/kubeconfig.go | 2 + .../admission/plugin/webhook/generic/BUILD | 4 +- .../plugin/webhook/generic/webhook.go | 4 +- .../admission/plugin/webhook/mutating/BUILD | 2 +- .../plugin/webhook/mutating/dispatcher.go | 12 +- .../admission/plugin/webhook/namespace/BUILD | 2 +- .../pkg/admission/plugin/webhook/object/BUILD | 2 +- .../admission/plugin/webhook/request/BUILD | 2 +- .../pkg/admission/plugin/webhook/rules/BUILD | 4 +- .../admission/plugin/webhook/rules/rules.go | 14 +- .../admission/plugin/webhook/validating/BUILD | 2 +- .../plugin/webhook/validating/dispatcher.go | 10 +- .../k8s.io/apiserver/pkg/apis/apiserver/BUILD | 1 + .../pkg/apis/apiserver/install/BUILD | 1 + .../pkg/apis/apiserver/install/install.go | 7 + .../apiserver/pkg/apis/apiserver/register.go | 20 +- .../pkg/apis/apiserver}/v1/BUILD | 10 +- .../apiserver/pkg/apis/apiserver/v1/doc.go | 23 + .../pkg/apis/apiserver}/v1/register.go | 25 +- .../apiserver/pkg/apis/apiserver/v1/types.go | 50 + .../apiserver/v1/zz_generated.conversion.go | 103 + .../apiserver/v1/zz_generated.deepcopy.go | 78 + .../apiserver/v1/zz_generated.defaults.go | 32 + .../k8s.io/apiserver/pkg/apis/audit/BUILD | 1 + .../k8s.io/apiserver/pkg/apis/audit/types.go | 23 +- .../apis/audit/v1/zz_generated.conversion.go | 12 +- .../audit/v1alpha1/zz_generated.conversion.go | 42 +- .../audit/v1beta1/zz_generated.conversion.go | 32 +- .../pkg/apis/audit/zz_generated.deepcopy.go | 63 +- .../vendor/k8s.io/apiserver/pkg/audit/BUILD | 3 +- .../k8s.io/apiserver/pkg/audit/request.go | 15 +- .../authentication/authenticatorfactory/BUILD | 3 +- .../authenticatorfactory/delegating.go | 27 +- .../authenticatorfactory/requestheader.go | 29 +- .../request/headerrequest/BUILD | 1 - .../request/headerrequest/requestheader.go | 91 +- .../pkg/authentication/request/x509/BUILD | 2 + .../request/x509/verify_options.go | 71 + .../pkg/authentication/request/x509/x509.go | 52 +- .../pkg/authentication/token/cache/BUILD | 3 +- .../token/cache/cache_simple.go | 14 +- .../token/cache/cached_token_authenticator.go | 89 +- .../authorization/authorizer/interfaces.go | 5 +- .../pkg/authorization/authorizerfactory/BUILD | 2 +- .../authorizerfactory/builtin.go | 7 +- .../authorizerfactory/delegating.go | 2 +- .../pkg/authorization/union/union.go | 5 +- .../k8s.io/apiserver/pkg/endpoints/BUILD | 1 + .../apiserver/pkg/endpoints/discovery/BUILD | 1 + .../apiserver/pkg/endpoints/discovery/util.go | 39 +- .../apiserver/pkg/endpoints/filters/BUILD | 2 +- .../pkg/endpoints/filters/authentication.go | 29 +- .../pkg/endpoints/filters/authorization.go | 2 +- .../pkg/endpoints/filters/impersonation.go | 2 +- .../apiserver/pkg/endpoints/handlers/BUILD | 8 + .../pkg/endpoints/handlers/create.go | 6 +- .../pkg/endpoints/handlers/delete.go | 13 +- .../pkg/endpoints/handlers/fieldmanager/BUILD | 25 +- .../handlers/fieldmanager/buildmanagerinfo.go | 72 + .../handlers/fieldmanager/capmanagers.go | 135 + .../handlers/fieldmanager/endpoints.yaml | 7018 ++++++++++ .../handlers/fieldmanager/fieldmanager.go | 317 +- .../handlers/fieldmanager/internal/BUILD | 1 - .../fieldmanager/internal/managedfields.go | 76 +- .../fieldmanager/internal/pathelement.go | 6 +- .../fieldmanager/internal/typeconverter.go | 22 - .../endpoints/handlers/fieldmanager/node.yaml | 259 + .../endpoints/handlers/fieldmanager/pod.yaml | 121 + .../handlers/fieldmanager/skipnonapplied.go | 66 + .../handlers/fieldmanager/stripmeta.go | 90 + .../handlers/fieldmanager/structuredmerge.go | 209 + .../apiserver/pkg/endpoints/handlers/get.go | 11 +- .../pkg/endpoints/handlers/helpers.go | 60 + .../handlers/negotiation/negotiate.go | 21 +- .../apiserver/pkg/endpoints/handlers/patch.go | 24 +- .../pkg/endpoints/handlers/response.go | 26 +- .../apiserver/pkg/endpoints/handlers/rest.go | 18 +- .../pkg/endpoints/handlers/update.go | 8 +- .../apiserver/pkg/endpoints/handlers/watch.go | 8 +- .../apiserver/pkg/endpoints/installer.go | 6 +- .../apiserver/pkg/endpoints/metrics/BUILD | 1 - .../pkg/endpoints/metrics/metrics.go | 58 +- .../apiserver/pkg/endpoints/request/BUILD | 1 + .../pkg/endpoints/request/requestinfo.go | 3 +- .../apiserver/pkg/features/kube_features.go | 7 +- .../apiserver/pkg/registry/generic/OWNERS | 4 - .../k8s.io/apiserver/pkg/registry/rest/OWNERS | 7 - .../vendor/k8s.io/apiserver/pkg/server/BUILD | 8 +- .../k8s.io/apiserver/pkg/server/config.go | 115 +- .../apiserver/pkg/server/config_selfclient.go | 34 +- .../pkg/server/dynamiccertificates/BUILD | 67 + .../server/dynamiccertificates/cert_key.go | 74 + .../server/dynamiccertificates/client_ca.go | 81 + .../configmap_cafile_content.go | 277 + .../dynamic_cafile_content.go | 254 + .../dynamic_serving_content.go | 179 + .../dynamic_sni_content.go | 50 + .../dynamiccertificates/named_certificates.go | 89 + .../dynamiccertificates/static_content.go | 171 + .../server/dynamiccertificates/tlsconfig.go | 263 + .../dynamiccertificates/union_content.go | 107 + .../pkg/server/dynamiccertificates/util.go | 68 + .../pkg/server/egressselector/config.go | 9 +- .../pkg/server/filters/maxinflight.go | 2 +- .../apiserver/pkg/server/filters/timeout.go | 23 +- .../apiserver/pkg/server/filters/wrap.go | 9 +- .../k8s.io/apiserver/pkg/server/hooks.go | 10 +- .../apiserver/pkg/server/routes/flags.go | 1 + .../apiserver/pkg/server/secure_serving.go | 190 +- .../pkg/server/storage/resource_config.go | 18 + .../pkg/server/storage/storage_factory.go | 4 +- .../vendor/k8s.io/apiserver/pkg/storage/BUILD | 2 + .../k8s.io/apiserver/pkg/storage/OWNERS | 3 - .../k8s.io/apiserver/pkg/storage/errors.go | 30 + .../k8s.io/apiserver/pkg/storage/etcd3/BUILD | 14 +- .../apiserver/pkg/storage/etcd3/compact.go | 2 +- .../apiserver/pkg/storage/etcd3/errors.go | 2 +- .../apiserver/pkg/storage/etcd3/event.go | 4 +- .../pkg/storage/etcd3/lease_manager.go | 2 +- .../apiserver/pkg/storage/etcd3/logger.go | 2 +- .../pkg/storage/etcd3/metrics/metrics.go | 7 +- .../apiserver/pkg/storage/etcd3/store.go | 42 +- .../apiserver/pkg/storage/etcd3/watcher.go | 2 +- .../pkg/storage/storagebackend/config.go | 6 +- .../pkg/storage/storagebackend/factory/BUILD | 8 +- .../storage/storagebackend/factory/etcd3.go | 16 +- .../k8s.io/apiserver/pkg/storage/value/BUILD | 6 +- .../apiserver/pkg/storage/value/metrics.go | 36 +- .../pkg/storage/value/transformer.go | 42 + .../apiserver/pkg/util/webhook/webhook.go | 50 +- .../pkg/authenticator/token/webhook/BUILD | 10 +- .../authenticator/token/webhook/webhook.go | 129 +- .../plugin/pkg/authorizer/webhook/BUILD | 9 +- .../plugin/pkg/authorizer/webhook/webhook.go | 168 +- .../vendor/k8s.io/client-go/dynamic/BUILD | 2 - .../vendor/k8s.io/client-go/dynamic/scheme.go | 78 +- .../vendor/k8s.io/client-go/dynamic/simple.go | 25 +- .../vendor/k8s.io/client-go/informers/BUILD | 4 + .../client-go/informers/discovery/BUILD | 2 + .../informers/discovery/interface.go | 8 + .../informers/discovery/v1beta1/BUILD | 36 + .../discovery/v1beta1/endpointslice.go | 89 + .../informers/discovery/v1beta1/interface.go | 45 + .../k8s.io/client-go/informers/factory.go | 6 + .../client-go/informers/flowcontrol/BUILD | 30 + .../informers/flowcontrol/interface.go | 46 + .../informers/flowcontrol/v1alpha1/BUILD | 37 + .../flowcontrol/v1alpha1/flowschema.go | 88 + .../flowcontrol/v1alpha1/interface.go | 52 + .../v1alpha1/prioritylevelconfiguration.go | 88 + .../k8s.io/client-go/informers/generic.go | 14 + .../client-go/informers/storage/v1/BUILD | 1 + .../client-go/informers/storage/v1/csinode.go | 88 + .../informers/storage/v1/interface.go | 7 + .../vendor/k8s.io/client-go/kubernetes/BUILD | 4 + .../k8s.io/client-go/kubernetes/clientset.go | 28 + .../k8s.io/client-go/kubernetes/fake/BUILD | 6 + .../kubernetes/fake/clientset_generated.go | 14 + .../client-go/kubernetes/fake/register.go | 4 + .../k8s.io/client-go/kubernetes/scheme/BUILD | 2 + .../client-go/kubernetes/scheme/register.go | 4 + .../v1/fake/fake_tokenreview_expansion.go | 6 + .../v1/tokenreview_expansion.go | 8 + .../fake/fake_tokenreview_expansion.go | 6 + .../v1beta1/tokenreview_expansion.go | 8 + ...fake_localsubjectaccessreview_expansion.go | 6 + .../fake_selfsubjectaccessreview_expansion.go | 6 + .../fake_selfsubjectrulesreview_expansion.go | 6 + .../fake_subjectaccessreview_expansion.go | 6 + .../v1/localsubjectaccessreview_expansion.go | 8 + .../v1/selfsubjectaccessreview_expansion.go | 8 + .../v1/selfsubjectrulesreview_expansion.go | 8 + .../v1/subjectaccessreview_expansion.go | 8 + ...fake_localsubjectaccessreview_expansion.go | 6 + .../fake_selfsubjectaccessreview_expansion.go | 6 + .../fake_selfsubjectrulesreview_expansion.go | 6 + .../fake_subjectaccessreview_expansion.go | 6 + .../localsubjectaccessreview_expansion.go | 8 + .../selfsubjectaccessreview_expansion.go | 8 + .../selfsubjectrulesreview_expansion.go | 8 + .../v1beta1/subjectaccessreview_expansion.go | 8 + .../typed/core/v1/event_expansion.go | 10 +- .../kubernetes/typed/discovery/v1beta1/BUILD | 39 + .../discovery/v1beta1/discovery_client.go | 89 + .../typed/discovery/v1beta1}/doc.go | 8 +- .../typed/discovery/v1beta1/endpointslice.go | 174 + .../typed/discovery/v1beta1/fake/BUILD | 38 + .../typed/discovery/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_discovery_client.go | 40 + .../v1beta1/fake/fake_endpointslice.go | 128 + .../discovery/v1beta1/generated_expansion.go | 21 + .../typed/flowcontrol/v1alpha1/BUILD | 40 + .../typed/flowcontrol/v1alpha1/doc.go | 20 + .../typed/flowcontrol/v1alpha1/fake/BUILD | 39 + .../typed/flowcontrol/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_flowcontrol_client.go | 44 + .../v1alpha1/fake/fake_flowschema.go | 131 + .../fake/fake_prioritylevelconfiguration.go | 131 + .../v1alpha1/flowcontrol_client.go | 94 + .../typed/flowcontrol/v1alpha1/flowschema.go | 180 + .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/prioritylevelconfiguration.go | 180 + .../kubernetes/typed/storage/v1/BUILD | 1 + .../kubernetes/typed/storage/v1/csinode.go | 164 + .../kubernetes/typed/storage/v1/fake/BUILD | 1 + .../typed/storage/v1/fake/fake_csinode.go | 120 + .../storage/v1/fake/fake_storage_client.go | 4 + .../typed/storage/v1/generated_expansion.go | 2 + .../typed/storage/v1/storage_client.go | 5 + .../client-go/listers/discovery/v1beta1/BUILD | 32 + .../discovery/v1beta1/endpointslice.go | 94 + .../discovery/v1beta1/expansion_generated.go | 27 + .../listers/flowcontrol/v1alpha1/BUILD | 33 + .../v1alpha1/expansion_generated.go | 27 + .../flowcontrol/v1alpha1/flowschema.go | 65 + .../v1alpha1/prioritylevelconfiguration.go | 65 + .../k8s.io/client-go/listers/storage/v1/BUILD | 1 + .../client-go/listers/storage/v1/csinode.go | 65 + .../listers/storage/v1/expansion_generated.go | 4 + .../vendor/k8s.io/client-go/metadata/BUILD | 2 +- .../k8s.io/client-go/metadata/metadata.go | 4 +- .../v1beta1/zz_generated.conversion.go | 5 - .../k8s.io/client-go/pkg/version/def.bzl | 2 +- .../plugin/pkg/client/auth/exec/exec.go | 19 +- .../vendor/k8s.io/client-go/rest/OWNERS | 1 - .../vendor/k8s.io/client-go/rest/client.go | 141 +- .../vendor/k8s.io/client-go/rest/config.go | 69 +- .../vendor/k8s.io/client-go/rest/request.go | 235 +- .../scale/scheme/appsv1beta1/conversion.go | 14 - .../scale/scheme/appsv1beta1/register.go | 2 +- .../appsv1beta1/zz_generated.conversion.go | 10 - .../scale/scheme/appsv1beta2/conversion.go | 14 - .../scale/scheme/appsv1beta2/register.go | 2 +- .../appsv1beta2/zz_generated.conversion.go | 10 - .../scale/scheme/autoscalingv1/conversion.go | 15 - .../scale/scheme/autoscalingv1/register.go | 2 +- .../autoscalingv1/zz_generated.conversion.go | 10 - .../scheme/extensionsv1beta1/conversion.go | 14 - .../scheme/extensionsv1beta1/register.go | 2 +- .../zz_generated.conversion.go | 10 - .../vendor/k8s.io/client-go/testing/BUILD | 1 + .../k8s.io/client-go/testing/fixture.go | 27 +- .../vendor/k8s.io/client-go/tools/cache/BUILD | 5 + .../k8s.io/client-go/tools/cache/OWNERS | 6 - .../client-go/tools/cache/delta_fifo.go | 21 - .../k8s.io/client-go/tools/cache/index.go | 2 +- .../k8s.io/client-go/tools/cache/reflector.go | 140 +- .../tools/cache/reflector_metrics.go | 13 - .../client-go/tools/cache/shared_informer.go | 2 +- .../tools/cache/thread_safe_store.go | 7 + .../client-go/tools/clientcmd/api/types.go | 5 + .../client-go/tools/clientcmd/api/v1/BUILD | 1 + .../tools/clientcmd/api/v1/conversion.go | 334 +- .../client-go/tools/clientcmd/api/v1/doc.go | 1 + .../tools/clientcmd/api/v1/register.go | 2 +- .../client-go/tools/clientcmd/api/v1/types.go | 2 + .../api/v1/zz_generated.conversion.go | 424 + .../tools/clientcmd/client_config.go | 12 +- .../k8s.io/client-go/tools/events/BUILD | 1 + .../k8s.io/client-go/tools/events/doc.go | 19 + .../tools/events/event_broadcaster.go | 6 +- .../client-go/tools/events/interfaces.go | 6 + .../client-go/tools/leaderelection/OWNERS | 1 - .../tools/leaderelection/leaderelection.go | 13 +- .../tools/leaderelection/resourcelock/BUILD | 2 + .../resourcelock/configmaplock.go | 13 +- .../resourcelock/endpointslock.go | 16 +- .../leaderelection/resourcelock/interface.go | 63 +- .../leaderelection/resourcelock/leaselock.go | 37 +- .../leaderelection/resourcelock/multilock.go | 103 + .../k8s.io/client-go/tools/metrics/OWNERS | 2 - .../k8s.io/client-go/tools/pager/pager.go | 15 +- .../k8s.io/client-go/tools/record/doc.go | 3 +- .../k8s.io/client-go/tools/record/event.go | 46 +- .../vendor/k8s.io/client-go/util/cert/BUILD | 1 + .../vendor/k8s.io/client-go/util/cert/io.go | 17 +- .../vendor/k8s.io/client-go/util/cert/pem.go | 12 + .../client-go/util/cert/server_inspection.go | 102 + .../util/certificate/certificate_manager.go | 44 +- .../k8s.io/client-go/util/retry/util.go | 65 +- .../util/workqueue/delaying_queue.go | 18 +- .../vendor/k8s.io/cloud-provider/OWNERS | 4 - .../vendor/k8s.io/cloud-provider/go.mod | 13 +- .../vendor/k8s.io/cloud-provider/go.sum | 97 +- .../k8s.io/cloud-provider/node/helpers/BUILD | 4 + .../cloud-provider/node/helpers/conditions.go | 56 + .../cloud-provider/node/helpers/labels.go | 102 + .../cloud-provider/service/helpers/helper.go | 27 + .../config/v1alpha1/defaults.go | 1 + .../component-base/config/v1alpha1/types.go | 4 +- .../v1alpha1/zz_generated.conversion.go | 46 +- .../config/v1alpha1/zz_generated.deepcopy.go | 10 + .../component-base/config/validation/BUILD | 38 + .../config/validation/validation.go | 61 + .../k8s.io/component-base/featuregate/BUILD | 1 + .../featuregate/feature_gate.go | 41 +- .../k8s.io/component-base/metrics/BUILD | 51 +- .../k8s.io/component-base/metrics/OWNERS | 7 +- .../component-base/metrics/collector.go | 148 + .../k8s.io/component-base/metrics/counter.go | 25 +- .../k8s.io/component-base/metrics/desc.go | 173 + .../k8s.io/component-base/metrics/gauge.go | 49 +- .../component-base/metrics/histogram.go | 38 +- .../k8s.io/component-base/metrics/http.go | 63 + .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 39 +- .../k8s.io/component-base/metrics/metric.go | 9 +- .../k8s.io/component-base/metrics/opts.go | 44 +- .../metrics/processstarttime.go | 6 +- .../metrics/prometheus/restclient}/BUILD | 8 +- .../metrics/prometheus/restclient/metrics.go} | 41 +- .../k8s.io/component-base/metrics/registry.go | 69 +- .../k8s.io/component-base/metrics/summary.go | 25 +- .../k8s.io/component-base/metrics/value.go | 60 + .../component-base/metrics/version_parser.go | 4 +- .../k8s.io/component-base/metrics/wrappers.go | 16 +- .../k8s.io/component-base/version/BUILD | 5 +- .../k8s.io/component-base/version/base.go | 2 +- .../k8s.io/component-base/version/def.bzl | 2 +- .../version/verflag/BUILD | 6 +- .../version/verflag/verflag.go | 2 +- .../vendor/k8s.io/csi-translation-lib/BUILD | 7 +- .../vendor/k8s.io/csi-translation-lib/go.mod | 9 +- .../vendor/k8s.io/csi-translation-lib/go.sum | 77 +- .../k8s.io/csi-translation-lib/plugins/BUILD | 1 + .../csi-translation-lib/plugins/aws_ebs.go | 56 +- .../csi-translation-lib/plugins/azure_disk.go | 21 +- .../csi-translation-lib/plugins/azure_file.go | 11 +- .../csi-translation-lib/plugins/gce_pd.go | 122 +- .../plugins/in_tree_volume.go | 140 +- .../plugins/openstack_cinder.go | 17 +- .../k8s.io/csi-translation-lib/translate.go | 58 +- cluster-autoscaler/vendor/k8s.io/klog/klog.go | 34 +- .../vendor/k8s.io/kube-scheduler/LICENSE | 202 + .../k8s.io/kube-scheduler/config/v1/BUILD | 33 + .../api => kube-scheduler/config/v1}/doc.go | 5 +- .../config/v1}/register.go | 28 +- .../api => kube-scheduler/config}/v1/types.go | 147 +- .../config/v1}/zz_generated.deepcopy.go | 335 +- .../kube-scheduler/config/v1alpha1/BUILD | 34 + .../kube-scheduler/config/v1alpha1/doc.go | 21 + .../config/v1alpha1/register.go | 43 + .../kube-scheduler/config/v1alpha1/types.go | 223 + .../config/v1alpha1/zz_generated.deepcopy.go | 351 + .../k8s.io/kubelet/config/v1beta1/types.go | 4 + .../pkg}/apis/deviceplugin/v1beta1/BUILD | 3 +- .../pkg}/apis/deviceplugin/v1beta1/api.pb.go | 2 +- .../pkg}/apis/deviceplugin/v1beta1/api.proto | 2 +- .../apis/deviceplugin/v1beta1/constants.go | 0 .../pkg}/apis/pluginregistration/v1/BUILD | 3 +- .../pkg}/apis/pluginregistration/v1/api.pb.go | 0 .../pkg}/apis/pluginregistration/v1/api.proto | 0 .../apis/pluginregistration/v1/constants.go | 0 .../kubernetes/cmd/kube-proxy/app/BUILD | 30 +- .../cmd/kube-proxy/app/conntrack.go | 2 +- .../kubernetes/cmd/kube-proxy/app/server.go | 72 +- .../cmd/kube-proxy/app/server_others.go | 26 +- .../cmd/kube-proxy/app/server_windows.go | 8 +- .../k8s.io/kubernetes/cmd/kubelet/app/BUILD | 23 +- .../k8s.io/kubernetes/cmd/kubelet/app/auth.go | 24 +- .../kubernetes/cmd/kubelet/app/options/BUILD | 11 +- .../cmd/kubelet/app/options/globalflags.go | 2 +- .../cmd/kubelet/app/options/options.go | 4 +- .../kubernetes/cmd/kubelet/app/plugins.go | 12 +- .../cmd/kubelet/app/plugins_providerless.go | 10 +- .../cmd/kubelet/app/plugins_providers.go | 54 +- .../kubernetes/cmd/kubelet/app/server.go | 98 +- .../k8s.io/kubernetes/pkg/api/testapi/OWNERS | 1 - .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 13 +- .../kubernetes/pkg/apis/admission/v1/BUILD | 1 + .../admission/v1/zz_generated.conversion.go | 7 +- .../pkg/apis/admission/v1beta1/BUILD | 1 + .../v1beta1/zz_generated.conversion.go | 7 +- .../pkg/apis/admissionregistration/types.go | 4 +- .../k8s.io/kubernetes/pkg/apis/apps/types.go | 5 +- .../kubernetes/pkg/apis/apps/v1/conversion.go | 308 - .../kubernetes/pkg/apis/apps/v1/register.go | 2 +- .../apis/apps/v1/zz_generated.conversion.go | 224 +- .../pkg/apis/apps/v1beta1/conversion.go | 246 +- .../apps/v1beta1/zz_generated.conversion.go | 126 +- .../pkg/apis/apps/v1beta2/conversion.go | 310 +- .../apps/v1beta2/zz_generated.conversion.go | 234 +- .../pkg/apis/authentication/v1/conversion.go | 15 +- .../pkg/apis/authentication/v1/register.go | 2 +- .../v1/zz_generated.conversion.go | 18 +- .../pkg/apis/authentication/v1beta1/BUILD | 1 - .../apis/authentication/v1beta1/register.go | 2 +- .../pkg/apis/authorization/v1/BUILD | 1 - .../pkg/apis/authorization/v1/register.go | 7 +- .../pkg/apis/authorization/v1beta1/BUILD | 1 - .../apis/authorization/v1beta1/register.go | 2 +- .../kubernetes/pkg/apis/autoscaling/types.go | 6 +- .../pkg/apis/autoscaling/v1/conversion.go | 34 - .../pkg/apis/autoscaling/v1/register.go | 2 +- .../autoscaling/v1/zz_generated.conversion.go | 110 - .../apis/autoscaling/v2beta1/conversion.go | 32 - .../pkg/apis/autoscaling/v2beta1/register.go | 2 +- .../v2beta1/zz_generated.conversion.go | 80 - .../pkg/apis/autoscaling/v2beta2/BUILD | 4 + .../pkg/apis/autoscaling/v2beta2/doc.go | 1 + .../v2beta2/zz_generated.conversion.go | 737 + .../pkg/apis/batch/v1/conversion.go | 9 - .../apis/batch/v1/zz_generated.conversion.go | 10 - .../kubernetes/pkg/apis/certificates/types.go | 46 +- .../k8s.io/kubernetes/pkg/apis/core/OWNERS | 1 - .../pkg/apis/core/annotation_key_constants.go | 6 +- .../k8s.io/kubernetes/pkg/apis/core/doc.go | 2 +- .../pkg/apis/core/helper/helpers.go | 28 - .../kubernetes/pkg/apis/core/install/OWNERS | 1 - .../k8s.io/kubernetes/pkg/apis/core/json.go | 5 +- .../pkg/apis/core/objectreference.go | 3 + .../kubernetes/pkg/apis/core/pods/helpers.go | 3 +- .../kubernetes/pkg/apis/core/register.go | 6 +- .../kubernetes/pkg/apis/core/resource.go | 26 +- .../k8s.io/kubernetes/pkg/apis/core/taint.go | 2 +- .../k8s.io/kubernetes/pkg/apis/core/types.go | 260 +- .../k8s.io/kubernetes/pkg/apis/core/v1/OWNERS | 1 - .../kubernetes/pkg/apis/core/v1/conversion.go | 30 +- .../pkg/apis/core/v1/helper/helpers.go | 12 +- .../apis/core/v1/zz_generated.conversion.go | 362 +- .../pkg/apis/core/validation/OWNERS | 2 - .../pkg/apis/core/validation/validation.go | 48 +- .../pkg/apis/core/zz_generated.deepcopy.go | 5 + .../kubernetes/pkg/apis/discovery/BUILD | 1 + .../pkg/apis/discovery/install/BUILD | 1 + .../pkg/apis/discovery/install/install.go | 4 +- .../kubernetes/pkg/apis/discovery/types.go | 46 +- .../pkg/apis/discovery/v1alpha1/defaults.go | 11 +- .../v1alpha1/zz_generated.conversion.go | 6 +- .../v1alpha1/zz_generated.defaults.go | 1 - .../pkg/apis/discovery/v1beta1/BUILD | 52 + .../pkg/apis/discovery/v1beta1/defaults.go | 42 + .../pkg/apis/discovery/v1beta1/doc.go | 22 + .../pkg/apis/discovery/v1beta1/register.go | 45 + .../v1beta1/zz_generated.conversion.go | 218 + .../v1beta1/zz_generated.defaults.go | 49 + .../apis/discovery/zz_generated.deepcopy.go | 10 +- .../events/v1beta1/zz_generated.conversion.go | 10 - .../kubernetes/pkg/apis/extensions/OWNERS | 2 - .../pkg/apis/extensions/register.go | 1 + .../kubernetes/pkg/apis/extensions/types.go | 2 +- .../pkg/apis/extensions/v1beta1/conversion.go | 40 - .../pkg/apis/extensions/v1beta1/register.go | 2 +- .../v1beta1/zz_generated.conversion.go | 164 +- .../pkg/apis/node/v1alpha1/conversion.go | 8 - .../pkg/apis/node/v1alpha1/register.go | 2 +- .../node/v1alpha1/zz_generated.conversion.go | 10 - .../kubernetes/pkg/apis/policy/register.go | 4 +- .../kubernetes/pkg/apis/policy/types.go | 5 +- .../pkg/apis/policy/validation/validation.go | 15 + .../rbac/v1alpha1/zz_generated.conversion.go | 10 - .../kubernetes/pkg/apis/scheduling/BUILD | 14 +- .../kubernetes/pkg/apis/scheduling/v1/BUILD | 9 +- .../pkg/apis/scheduling/{ => v1}/helpers.go | 34 +- .../pkg/apis/scheduling/v1alpha1/defaults.go | 2 +- .../pkg/apis/scheduling/v1alpha1/register.go | 2 +- .../pkg/apis/scheduling/v1beta1/defaults.go | 2 +- .../pkg/apis/scheduling/v1beta1/register.go | 2 +- .../kubernetes/pkg/apis/storage/types.go | 2 +- .../storage/v1/zz_generated.conversion.go | 164 + .../providers/.import-restrictions | 12 +- .../pkg/controller/.import-restrictions | 27 +- .../pkg/controller/controller_ref_manager.go | 120 +- .../pkg/controller/controller_utils.go | 125 +- .../volume/persistentvolume/util/BUILD | 1 + .../volume/persistentvolume/util/util.go | 34 +- .../pkg/controller/volume/scheduling/BUILD | 20 +- .../volume/scheduling/metrics}/BUILD | 10 +- .../metrics.go} | 6 +- .../scheduling/scheduler_assume_cache.go | 7 +- .../volume/scheduling/scheduler_binder.go | 148 +- .../scheduling/scheduler_binder_cache.go | 7 +- .../kubernetes/pkg/credentialprovider/OWNERS | 2 - .../pkg/credentialprovider/aws/BUILD | 2 +- .../pkg/credentialprovider/aws/OWNERS | 2 - .../credentialprovider/aws/aws_credentials.go | 2 +- .../azure/azure_acr_helper.go | 11 +- .../pkg/credentialprovider/config.go | 15 +- .../pkg/credentialprovider/gcp/doc.go | 4 +- .../pkg/credentialprovider/gcp/metadata.go | 30 +- .../kubernetes/pkg/features/kube_features.go | 227 +- .../k8s.io/kubernetes/pkg/kubelet/BUILD | 29 +- .../k8s.io/kubernetes/pkg/kubelet/apis/BUILD | 5 - .../pkg/kubelet/apis/config/scheme/scheme.go | 7 +- .../pkg/kubelet/apis/config/types.go | 4 + .../kubelet/apis/config/v1beta1/defaults.go | 2 +- .../config/v1beta1/zz_generated.conversion.go | 2 + .../pkg/kubelet/apis/config/validation/BUILD | 1 + .../apis/config/validation/validation.go | 10 + .../apis/resourcemetrics/v1alpha1/BUILD | 1 + .../apis/resourcemetrics/v1alpha1/config.go | 111 +- .../pkg/kubelet/apis/well_known_labels.go | 13 +- .../kubernetes/pkg/kubelet/cadvisor/BUILD | 22 + .../pkg/kubelet/cadvisor/cadvisor_linux.go | 3 +- .../kubelet/cadvisor/cadvisor_unsupported.go | 2 +- .../pkg/kubelet/cadvisor/helpers_linux.go | 2 +- .../kubelet/cadvisor/helpers_unsupported.go | 2 +- .../certificate/bootstrap/bootstrap.go | 2 +- .../pkg/kubelet/certificate/kubelet.go | 32 +- .../k8s.io/kubernetes/pkg/kubelet/cm/BUILD | 67 +- .../pkg/kubelet/cm/cgroup_manager_linux.go | 2 +- .../pkg/kubelet/cm/container_manager.go | 2 + .../pkg/kubelet/cm/container_manager_linux.go | 11 +- .../cm/container_manager_unsupported.go | 5 +- .../kubelet/cm/container_manager_windows.go | 5 +- .../pkg/kubelet/cm/cpumanager/BUILD | 6 +- .../pkg/kubelet/cm/cpumanager/OWNERS | 2 + .../pkg/kubelet/cm/cpumanager/cpu_manager.go | 109 +- .../kubelet/cm/cpumanager/fake_cpu_manager.go | 3 +- .../pkg/kubelet/cm/cpumanager/policy.go | 5 + .../pkg/kubelet/cm/cpumanager/policy_none.go | 5 + .../kubelet/cm/cpumanager/policy_static.go | 140 +- .../pkg/kubelet/cm/cpumanager/state/state.go | 2 +- .../cm/cpumanager/topology/topology.go | 2 +- .../kubelet/cm/cpumanager/topology_hints.go | 131 - .../pkg/kubelet/cm/devicemanager/BUILD | 14 +- .../pkg/kubelet/cm/devicemanager/OWNERS | 1 + .../cm/devicemanager/device_plugin_stub.go | 4 +- .../pkg/kubelet/cm/devicemanager/endpoint.go | 2 +- .../pkg/kubelet/cm/devicemanager/manager.go | 30 +- .../kubelet/cm/devicemanager/pod_devices.go | 2 +- .../cm/devicemanager/topology_hints.go | 31 +- .../kubelet/cm/pod_container_manager_linux.go | 16 +- .../pkg/kubelet/cm/topologymanager/BUILD | 7 +- .../pkg/kubelet/cm/topologymanager/OWNERS | 10 + .../{socketmask => bitmask}/BUILD | 6 +- .../cm/topologymanager/bitmask/bitmask.go | 194 + .../pkg/kubelet/cm/topologymanager/policy.go | 9 +- .../cm/topologymanager/policy_best_effort.go | 164 +- .../kubelet/cm/topologymanager/policy_none.go | 6 +- .../cm/topologymanager/policy_restricted.go | 16 +- .../policy_single_numa_node.go | 19 +- .../topologymanager/socketmask/socketmask.go | 194 - .../cm/topologymanager/topology_manager.go | 237 +- .../kubernetes/pkg/kubelet/cm/util/BUILD | 4 + .../kubernetes/pkg/kubelet/config/BUILD | 7 + .../kubernetes/pkg/kubelet/container/BUILD | 1 + .../pkg/kubelet/container/helpers.go | 25 +- .../pkg/kubelet/container/runtime.go | 3 + .../kubelet/container/testing/fake_runtime.go | 4 + .../container/testing/fake_runtime_helper.go | 2 +- .../kubelet/container/testing/runtime_mock.go | 5 + .../kubernetes/pkg/kubelet/dockershim/BUILD | 3 + .../pkg/kubelet/dockershim/cm/BUILD | 10 + .../pkg/kubelet/dockershim/docker_service.go | 4 +- .../kubelet/dockershim/docker_streaming.go | 6 +- .../dockershim/docker_streaming_windows.go | 2 +- .../kubelet/dockershim/helpers_unsupported.go | 2 +- .../pkg/kubelet/dockershim/helpers_windows.go | 2 +- .../dockershim/libdocker/fake_client.go | 1 + .../pkg/kubelet/dockershim/metrics/BUILD | 1 - .../pkg/kubelet/dockershim/metrics/metrics.go | 4 +- .../pkg/kubelet/dockershim/network/OWNERS | 3 +- .../pkg/kubelet/dockershim/network/cni/BUILD | 17 + .../pkg/kubelet/dockershim/network/cni/cni.go | 36 +- .../dockershim/network/cni/cni_others.go | 4 + .../dockershim/network/cni/cni_windows.go | 11 +- .../network/hostport/fake_iptables.go | 10 +- .../dockershim/network/hostport/hostport.go | 8 +- .../network/hostport/hostport_manager.go | 15 +- .../network/hostport/hostport_syncer.go | 24 +- .../kubelet/dockershim/network/kubenet/BUILD | 42 +- .../network/kubenet/kubenet_linux.go | 125 +- .../kubelet/dockershim/network/metrics/BUILD | 1 - .../dockershim/network/metrics/metrics.go | 4 +- .../pkg/kubelet/dockershim/network/network.go | 3 + .../pkg/kubelet/dockershim/network/plugins.go | 20 +- .../kubernetes/pkg/kubelet/eviction/BUILD | 6 +- .../pkg/kubelet/eviction/eviction_manager.go | 36 +- .../pkg/kubelet/eviction/helpers.go | 10 +- .../eviction/threshold_notifier_linux.go | 2 +- .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 92 +- .../kubernetes/pkg/kubelet/kubelet_getters.go | 15 +- .../pkg/kubelet/kubelet_network_linux.go | 9 + .../pkg/kubelet/kubelet_network_others.go | 2 +- .../pkg/kubelet/kubelet_node_status.go | 54 +- .../pkg/kubelet/kubelet_node_status_others.go | 23 + .../kubelet_node_status_windows.go} | 19 +- .../kubernetes/pkg/kubelet/kubelet_pods.go | 60 +- .../kubelet/kubeletconfig/checkpoint/BUILD | 1 + .../kubeletconfig/checkpoint/download.go | 6 +- .../kubelet/kubeletconfig/configfiles/BUILD | 2 + .../kubeletconfig/configfiles/configfiles.go | 2 +- .../kubelet/kubeletconfig/util/codec/BUILD | 3 + .../kubelet/kubeletconfig/util/codec/codec.go | 57 +- .../kubernetes/pkg/kubelet/kuberuntime/BUILD | 5 + .../kuberuntime/fake_kuberuntime_manager.go | 1 + .../pkg/kubelet/kuberuntime/helpers.go | 4 +- .../kuberuntime/kuberuntime_container.go | 32 +- .../kuberuntime/kuberuntime_manager.go | 38 +- .../pkg/kubelet/logs/container_log_manager.go | 2 +- .../kubernetes/pkg/kubelet/metrics/BUILD | 1 - .../pkg/kubelet/metrics/collectors/BUILD | 5 +- .../kubelet/metrics/collectors/log_metrics.go | 28 +- .../metrics/collectors/volume_stats.go | 62 +- .../kubernetes/pkg/kubelet/metrics/metrics.go | 51 +- .../pkg/kubelet/mountpod/mount_pod.go | 120 - .../kubernetes/pkg/kubelet/network/dns/dns.go | 6 +- .../pkg/kubelet/nodelease/controller.go | 13 +- .../kubernetes/pkg/kubelet/nodestatus/BUILD | 4 +- .../pkg/kubelet/nodestatus/setters.go | 6 +- .../k8s.io/kubernetes/pkg/kubelet/oom/BUILD | 12 + .../k8s.io/kubernetes/pkg/kubelet/pleg/BUILD | 2 +- .../pkg/kubelet/pluginmanager/BUILD | 2 +- .../cache/actual_state_of_world.go | 5 +- .../cache/desired_state_of_world.go | 13 +- .../pkg/kubelet/pluginmanager/cache/types.go | 2 +- .../pkg/kubelet/pluginmanager/metrics/BUILD | 4 +- .../kubelet/pluginmanager/metrics/metrics.go | 30 +- .../pluginmanager/operationexecutor/BUILD | 3 +- .../operationexecutor/operation_executor.go | 7 +- .../operationexecutor/operation_generator.go | 18 +- .../kubelet/pluginmanager/plugin_manager.go | 2 - .../kubelet/pluginmanager/pluginwatcher/BUILD | 4 +- .../pluginwatcher/example_handler.go | 9 +- .../pluginwatcher/example_plugin.go | 7 +- .../pluginwatcher/plugin_watcher.go | 50 +- .../kubelet/pluginmanager/reconciler/BUILD | 2 +- .../pluginmanager/reconciler/reconciler.go | 2 +- .../k8s.io/kubernetes/pkg/kubelet/pod/BUILD | 4 + .../pkg/kubelet/pod/mirror_client.go | 60 +- .../kubernetes/pkg/kubelet/pod/pod_manager.go | 6 +- .../kubernetes/pkg/kubelet/preemption/BUILD | 1 + .../pkg/kubelet/preemption/preemption.go | 6 + .../kubernetes/pkg/kubelet/prober/BUILD | 2 +- .../kubernetes/pkg/kubelet/prober/prober.go | 45 +- .../pkg/kubelet/prober/prober_manager.go | 2 +- .../kubelet/prober/results/results_manager.go | 13 +- .../kubernetes/pkg/kubelet/prober/worker.go | 16 +- .../k8s.io/kubernetes/pkg/kubelet/qos/doc.go | 2 +- .../kubernetes/pkg/kubelet/qos/policy.go | 11 +- .../kubernetes/pkg/kubelet/reason_cache.go | 10 +- .../kubernetes/pkg/kubelet/server/BUILD | 3 +- .../pkg/kubelet/server/metrics/BUILD | 1 - .../pkg/kubelet/server/metrics/metrics.go | 4 +- .../kubernetes/pkg/kubelet/server/server.go | 22 +- .../kubernetes/pkg/kubelet/server/stats/BUILD | 10 +- .../stats/prometheus_resource_metrics.go | 72 +- .../pkg/kubelet/stats/cri_stats_provider.go | 5 +- .../pkg/kubelet/stats/pidlimit/BUILD | 4 + .../kubernetes/pkg/kubelet/status/generate.go | 11 +- .../pkg/kubelet/status/status_manager.go | 7 +- .../k8s.io/kubernetes/pkg/kubelet/token/BUILD | 1 + .../pkg/kubelet/token/token_manager.go | 26 +- .../pkg/kubelet/types/pod_update.go | 21 +- .../k8s.io/kubernetes/pkg/kubelet/util/BUILD | 16 + .../kubernetes/pkg/kubelet/util/ioutils/BUILD | 12 +- .../pkg/kubelet/util/ioutils/ioutils.go | 33 + .../kubernetes/pkg/kubelet/volume_host.go | 80 +- .../pkg/kubelet/volumemanager/BUILD | 6 +- .../cache/actual_state_of_world.go | 28 +- .../cache/desired_state_of_world.go | 8 +- .../pkg/kubelet/volumemanager/metrics/BUILD | 3 +- .../kubelet/volumemanager/metrics/metrics.go | 30 +- .../pkg/kubelet/volumemanager/populator/BUILD | 3 + .../desired_state_of_world_populator.go | 73 +- .../kubelet/volumemanager/reconciler/BUILD | 4 +- .../volumemanager/reconciler/reconciler.go | 58 +- .../kubelet/volumemanager/volume_manager.go | 22 +- .../kubernetes/pkg/kubelet/winstats/BUILD | 1 + .../kubelet/winstats/perfcounter_nodestats.go | 11 +- .../pkg/kubelet/winstats/perfcounters.go | 5 - .../pkg/kubelet/winstats/version.go | 101 +- .../k8s.io/kubernetes/pkg/kubemark/BUILD | 2 +- .../k8s.io/kubernetes/pkg/kubemark/OWNERS | 2 + .../kubernetes/pkg/kubemark/hollow_kubelet.go | 38 +- .../kubernetes/pkg/kubemark/hollow_proxy.go | 9 +- .../k8s.io/kubernetes/pkg/probe/exec/BUILD | 1 + .../k8s.io/kubernetes/pkg/probe/exec/exec.go | 21 +- .../k8s.io/kubernetes/pkg/probe/http/BUILD | 2 +- .../k8s.io/kubernetes/pkg/probe/http/http.go | 8 +- .../vendor/k8s.io/kubernetes/pkg/proxy/BUILD | 6 +- .../pkg/proxy/apis/config/scheme/scheme.go | 2 +- .../kubernetes/pkg/proxy/apis/config/types.go | 2 +- .../k8s.io/kubernetes/pkg/proxy/config/BUILD | 4 +- .../kubernetes/pkg/proxy/config/config.go | 129 +- .../k8s.io/kubernetes/pkg/proxy/endpoints.go | 134 +- .../pkg/proxy/endpointslicecache.go | 261 +- .../kubernetes/pkg/proxy/healthcheck/BUILD | 4 +- .../pkg/proxy/healthcheck/common.go | 63 + .../pkg/proxy/healthcheck/proxier_health.go | 163 + .../{healthcheck.go => service_health.go} | 178 +- .../kubernetes/pkg/proxy/iptables/BUILD | 5 +- .../kubernetes/pkg/proxy/iptables/proxier.go | 145 +- .../k8s.io/kubernetes/pkg/proxy/ipvs/BUILD | 11 +- .../kubernetes/pkg/proxy/ipvs/README.md | 2 +- .../k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go | 13 +- .../kubernetes/pkg/proxy/ipvs/meta_proxier.go | 38 +- .../kubernetes/pkg/proxy/ipvs/proxier.go | 213 +- .../k8s.io/kubernetes/pkg/proxy/metrics/BUILD | 1 - .../kubernetes/pkg/proxy/metrics/metrics.go | 23 +- .../k8s.io/kubernetes/pkg/proxy/service.go | 10 +- .../k8s.io/kubernetes/pkg/proxy/topology.go | 80 + .../k8s.io/kubernetes/pkg/proxy/types.go | 8 +- .../kubernetes/pkg/proxy/userspace/BUILD | 7 +- .../kubernetes/pkg/proxy/userspace/proxier.go | 11 +- .../pkg/proxy/userspace/roundrobin.go | 23 +- .../kubernetes/pkg/proxy/winkernel/BUILD | 4 +- .../kubernetes/pkg/proxy/winkernel/metrics.go | 15 +- .../kubernetes/pkg/proxy/winkernel/proxier.go | 75 +- .../kubernetes/pkg/proxy/winuserspace/BUILD | 2 +- .../pkg/proxy/winuserspace/proxier.go | 4 +- .../pkg/proxy/winuserspace/proxysocket.go | 6 +- .../kubernetes/pkg/quota/v1/resources.go | 19 - .../k8s.io/kubernetes/pkg/scheduler/BUILD | 54 +- .../kubernetes/pkg/scheduler/algorithm/BUILD | 7 +- .../pkg/scheduler/algorithm/predicates/BUILD | 11 +- .../predicates/csi_volume_predicate.go | 73 +- .../scheduler/algorithm/predicates/error.go | 32 + .../algorithm/predicates/metadata.go | 594 +- .../algorithm/predicates/predicates.go | 484 +- .../algorithm/predicates/testing_helper.go | 95 - .../scheduler/algorithm/predicates/utils.go | 8 +- .../pkg/scheduler/algorithm/priorities/BUILD | 16 +- .../balanced_resource_allocation.go | 6 +- .../algorithm/priorities/even_pods_spread.go | 288 +- .../algorithm/priorities/image_locality.go | 16 +- .../algorithm/priorities/interpod_affinity.go | 206 +- .../algorithm/priorities/least_requested.go | 6 +- .../algorithm/priorities/metadata.go | 103 +- .../algorithm/priorities/most_requested.go | 6 +- .../algorithm/priorities/node_affinity.go | 18 +- .../algorithm/priorities/node_label.go | 39 +- .../priorities/node_prefer_avoid_pods.go | 16 +- .../scheduler/algorithm/priorities/reduce.go | 12 +- .../priorities/requested_to_capacity_ratio.go | 15 +- .../priorities/resource_allocation.go | 14 +- .../algorithm/priorities/resource_limits.go | 16 +- .../priorities/selector_spreading.go | 209 +- .../algorithm/priorities/taint_toleration.go | 16 +- .../algorithm/priorities/test_util.go | 47 +- .../scheduler/algorithm/priorities/types.go | 27 +- .../algorithm/scheduler_interface.go | 12 +- .../pkg/scheduler/algorithm/types.go | 97 +- .../plugins.go => algorithm_factory.go} | 312 +- .../pkg/scheduler/algorithmprovider/BUILD | 7 +- .../algorithmprovider/defaults/BUILD | 3 +- .../algorithmprovider/defaults/defaults.go | 60 +- .../defaults/register_predicates.go | 109 +- .../defaults/register_priorities.go | 64 +- .../k8s.io/kubernetes/pkg/scheduler/api/BUILD | 24 +- .../kubernetes/pkg/scheduler/api/latest/BUILD | 34 - .../pkg/scheduler/api/latest/latest.go | 56 - .../scheduler/api/v1/zz_generated.deepcopy.go | 690 - .../scheduler/api/validation/validation.go | 82 - .../pkg/scheduler/api/well_known_labels.go | 44 - .../pkg/scheduler/apis/config/BUILD | 3 + .../types.go => apis/config/legacy_types.go} | 155 +- .../pkg/scheduler/apis/config/register.go | 2 + .../pkg/scheduler/apis/config/scheme/BUILD | 30 + .../scheduler/apis/config/scheme/scheme.go | 46 + .../pkg/scheduler/apis/config/types.go | 67 +- .../pkg/scheduler/apis/config/v1/BUILD | 35 + .../pkg/scheduler/apis/config/v1/doc.go | 24 + .../pkg/scheduler/apis/config/v1/register.go | 43 + .../apis/config/v1/zz_generated.conversion.go | 559 + .../apis/config/v1/zz_generated.deepcopy.go | 21 + .../apis/config/v1/zz_generated.defaults.go | 32 + .../pkg/scheduler/apis/config/v1alpha1/BUILD | 59 + .../apis/config/v1alpha1/conversion.go | 68 + .../apis/config/v1alpha1/defaults.go | 171 + .../pkg/scheduler/apis/config/v1alpha1/doc.go | 24 + .../apis/config/v1alpha1/register.go | 43 + .../v1alpha1/zz_generated.conversion.go | 686 + .../config/v1alpha1/zz_generated.deepcopy.go | 21 + .../config/v1alpha1/zz_generated.defaults.go | 40 + .../{api => apis/config}/validation/BUILD | 22 +- .../apis/config/validation/validation.go | 169 + .../apis/config/zz_generated.deepcopy.go | 355 +- .../pkg/scheduler/apis/extender/v1/BUILD | 30 + .../{api => apis/extender}/v1/doc.go | 4 +- .../pkg/scheduler/apis/extender/v1/types.go | 126 + .../apis/extender/v1/zz_generated.deepcopy.go | 339 + .../kubernetes/pkg/scheduler/core/BUILD | 17 +- .../kubernetes/pkg/scheduler/core/extender.go | 105 +- .../pkg/scheduler/core/generic_scheduler.go | 577 +- .../kubernetes/pkg/scheduler/eventhandlers.go | 118 +- .../kubernetes/pkg/scheduler/factory.go | 537 + .../kubernetes/pkg/scheduler/factory/BUILD | 98 - .../pkg/scheduler/factory/factory.go | 651 - .../pkg/scheduler/framework/plugins/BUILD | 81 + .../framework/plugins/default_registry.go | 337 + .../plugins/defaultpodtopologyspread/BUILD | 45 + .../default_pod_topology_spread.go | 88 + .../framework/plugins/imagelocality/BUILD | 46 + .../plugins/imagelocality/image_locality.go | 65 + .../framework/plugins/interpodaffinity/BUILD | 48 + .../interpodaffinity/interpod_affinity.go | 91 + .../framework/plugins/migration/BUILD | 37 + .../framework/plugins/migration/utils.go | 121 + .../framework/plugins/nodeaffinity/BUILD | 46 + .../plugins/nodeaffinity/node_affinity.go | 81 + .../framework/plugins/nodelabel/BUILD | 45 + .../framework/plugins/nodelabel/node_label.go | 118 + .../framework/plugins/nodename/BUILD | 43 + .../framework/plugins/nodename/node_name.go | 52 + .../framework/plugins/nodeports/BUILD | 42 + .../framework/plugins/nodeports/node_ports.go | 52 + .../plugins/nodepreferavoidpods}/BUILD | 21 +- .../node_prefer_avoid_pods.go | 66 + .../framework/plugins/noderesources/BUILD | 65 + .../noderesources/balanced_allocation.go | 66 + .../framework/plugins/noderesources/fit.go | 57 + .../plugins/noderesources/least_allocated.go | 65 + .../plugins/noderesources/most_allocated.go | 65 + .../plugins/noderesources/test_util.go | 39 + .../framework/plugins/nodeunschedulable/BUILD | 42 + .../nodeunschedulable/node_unschedulable.go | 54 + .../framework/plugins/nodevolumelimits/BUILD | 70 + .../plugins/nodevolumelimits/azure.go | 62 + .../plugins/nodevolumelimits/cinder.go | 61 + .../framework/plugins/nodevolumelimits/csi.go | 62 + .../nodevolumelimits/csinode_helper.go | 32 + .../framework/plugins/nodevolumelimits/ebs.go | 62 + .../framework/plugins/nodevolumelimits/gce.go | 62 + .../framework/plugins/podtopologyspread/BUILD | 45 + .../podtopologyspread/pod_topology_spread.go | 87 + .../plugins/requestedtocapacityratio/BUILD | 43 + .../requested_to_capacity_ratio.go | 80 + .../framework/plugins/serviceaffinity/BUILD | 48 + .../serviceaffinity/service_affinity.go | 117 + .../framework/plugins/tainttoleration/BUILD | 45 + .../tainttoleration/taint_toleration.go | 81 + .../framework/plugins/volumebinding/BUILD | 30 + .../plugins/volumebinding/volume_binding.go | 56 + .../plugins/volumerestrictions/BUILD | 42 + .../volumerestrictions/volume_restrictions.go | 53 + .../framework/plugins/volumezone/BUILD | 45 + .../plugins/volumezone/volume_zone.go | 61 + .../pkg/scheduler/framework/v1alpha1/BUILD | 16 +- .../scheduler/framework/v1alpha1/context.go | 94 - .../framework/v1alpha1/cycle_state.go | 130 + .../scheduler/framework/v1alpha1/framework.go | 806 +- .../scheduler/framework/v1alpha1/interface.go | 193 +- .../framework/v1alpha1/metrics_recorder.go | 115 + .../scheduler/framework/v1alpha1/registry.go | 20 +- .../framework/v1alpha1/waiting_pods_map.go | 68 +- .../pkg/scheduler/internal/cache/BUILD | 9 +- .../pkg/scheduler/internal/cache/cache.go | 104 +- .../pkg/scheduler/internal/cache/interface.go | 27 +- .../pkg/scheduler/internal/cache/node_tree.go | 58 +- .../dbus => scheduler/internal/heap}/BUILD | 24 +- .../scheduler/{util => internal/heap}/heap.go | 40 +- .../pkg/scheduler/internal/queue/BUILD | 10 +- .../pkg/scheduler/internal/queue/events.go | 72 + .../internal/queue/scheduling_queue.go | 231 +- .../kubernetes/pkg/scheduler/listers/BUILD | 30 + .../pkg/scheduler/listers/listers.go | 51 + .../kubernetes/pkg/scheduler/metrics/BUILD | 3 +- .../pkg/scheduler/metrics/metrics.go | 212 +- .../kubernetes/pkg/scheduler/nodeinfo/BUILD | 10 +- .../pkg/scheduler/nodeinfo/node_info.go | 24 - .../pkg/scheduler/nodeinfo/snapshot/BUILD | 41 + .../scheduler/nodeinfo/snapshot/snapshot.go | 196 + .../kubernetes/pkg/scheduler/nodeinfo/util.go | 78 - .../kubernetes/pkg/scheduler/scheduler.go | 518 +- .../kubernetes/pkg/scheduler/util/BUILD | 11 +- .../kubernetes/pkg/scheduler/util/utils.go | 68 +- .../scheduler/volumebinder/volume_binder.go | 5 +- .../pkg/security/apparmor/helpers.go | 6 +- .../pkg/security/apparmor/validate.go | 14 +- .../podsecuritypolicy/seccomp/strategy.go | 4 +- .../sysctl/mustmatchpatterns.go | 2 - .../kubernetes/pkg/serviceaccount/legacy.go | 2 +- .../util/async/bounded_frequency_runner.go | 110 +- .../kubernetes/pkg/util/bandwidth/BUILD | 9 + .../kubernetes/pkg/util/configz/configz.go | 1 + .../k8s.io/kubernetes/pkg/util/dbus/dbus.go | 133 - .../kubernetes/pkg/util/dbus/fake_dbus.go | 140 - .../kubernetes/pkg/util/ebtables/ebtables.go | 2 +- .../k8s.io/kubernetes/pkg/util/env/env.go | 6 + .../k8s.io/kubernetes/pkg/util/flock/BUILD | 6 + .../k8s.io/kubernetes/pkg/util/iptables/BUILD | 18 +- .../kubernetes/pkg/util/iptables/OWNERS | 1 + .../kubernetes/pkg/util/iptables/iptables.go | 242 +- .../k8s.io/kubernetes/pkg/util/ipvs/BUILD | 8 + .../k8s.io/kubernetes/pkg/util/mount/BUILD | 95 - .../k8s.io/kubernetes/pkg/util/mount/exec.go | 53 - .../k8s.io/kubernetes/pkg/util/node/node.go | 75 +- .../k8s.io/kubernetes/pkg/util/oom/BUILD | 8 + .../k8s.io/kubernetes/pkg/util/pod/pod.go | 4 +- .../k8s.io/kubernetes/pkg/util/procfs/BUILD | 7 + .../kubernetes/pkg/util/removeall/BUILD | 4 +- .../pkg/util/removeall/removeall.go | 2 +- .../k8s.io/kubernetes/pkg/util/resizefs/BUILD | 26 +- .../pkg/util/resizefs/resizefs_linux.go | 6 +- .../pkg/util/resizefs/resizefs_unsupported.go | 2 +- .../k8s.io/kubernetes/pkg/util/rlimit/BUILD | 3 + .../k8s.io/kubernetes/pkg/util/selinux/BUILD | 3 + .../kubernetes/pkg/version/.gitattributes | 1 - .../k8s.io/kubernetes/pkg/version/BUILD | 34 - .../k8s.io/kubernetes/pkg/version/base.go | 63 - .../k8s.io/kubernetes/pkg/version/def.bzl | 39 - .../k8s.io/kubernetes/pkg/version/version.go | 42 - .../vendor/k8s.io/kubernetes/pkg/volume/BUILD | 8 +- .../k8s.io/kubernetes/pkg/volume/awsebs/BUILD | 4 +- .../kubernetes/pkg/volume/awsebs/attacher.go | 4 +- .../pkg/volume/awsebs/attacher_linux.go | 1 + .../pkg/volume/awsebs/attacher_unsupported.go | 1 + .../pkg/volume/awsebs/attacher_windows.go | 3 +- .../kubernetes/pkg/volume/awsebs/aws_ebs.go | 45 +- .../pkg/volume/awsebs/aws_ebs_block.go | 51 +- .../kubernetes/pkg/volume/awsebs/aws_util.go | 50 +- .../kubernetes/pkg/volume/azure_dd/BUILD | 44 +- .../pkg/volume/azure_dd/attacher.go | 2 +- .../pkg/volume/azure_dd/azure_common_linux.go | 6 +- .../azure_dd/azure_common_unsupported.go | 6 +- .../volume/azure_dd/azure_common_windows.go | 16 +- .../pkg/volume/azure_dd/azure_dd.go | 7 - .../pkg/volume/azure_dd/azure_dd_block.go | 22 +- .../pkg/volume/azure_dd/azure_mounter.go | 5 +- .../pkg/volume/azure_dd/azure_provision.go | 26 +- .../kubernetes/pkg/volume/azure_file/BUILD | 16 +- .../pkg/volume/azure_file/azure_file.go | 14 +- .../k8s.io/kubernetes/pkg/volume/cephfs/BUILD | 4 +- .../kubernetes/pkg/volume/cephfs/cephfs.go | 13 +- .../k8s.io/kubernetes/pkg/volume/cinder/BUILD | 4 +- .../kubernetes/pkg/volume/cinder/attacher.go | 5 +- .../kubernetes/pkg/volume/cinder/cinder.go | 16 +- .../pkg/volume/cinder/cinder_block.go | 30 +- .../kubernetes/pkg/volume/configmap/BUILD | 2 +- .../kubernetes/pkg/volume/configmap/OWNERS | 1 + .../pkg/volume/configmap/configmap.go | 13 +- .../k8s.io/kubernetes/pkg/volume/csi/BUILD | 2 - .../k8s.io/kubernetes/pkg/volume/csi/OWNERS | 1 + .../kubernetes/pkg/volume/csi/csi_attacher.go | 181 +- .../kubernetes/pkg/volume/csi/csi_block.go | 189 +- .../kubernetes/pkg/volume/csi/csi_client.go | 440 +- .../kubernetes/pkg/volume/csi/csi_mounter.go | 16 +- .../kubernetes/pkg/volume/csi/csi_plugin.go | 89 +- .../kubernetes/pkg/volume/csi/csiv0/csi.pb.go | 5007 ------- .../pkg/volume/csi/nodeinfomanager/BUILD | 4 +- .../csi/nodeinfomanager/nodeinfomanager.go | 50 +- .../kubernetes/pkg/volume/csimigration/BUILD | 45 + .../pkg/volume/csimigration/plugin_manager.go | 156 + .../kubernetes/pkg/volume/downwardapi/OWNERS | 1 + .../pkg/volume/downwardapi/downwardapi.go | 4 - .../kubernetes/pkg/volume/emptydir/BUILD | 18 +- .../kubernetes/pkg/volume/emptydir/OWNERS | 1 + .../pkg/volume/emptydir/empty_dir.go | 13 +- .../pkg/volume/emptydir/empty_dir_linux.go | 4 +- .../volume/emptydir/empty_dir_unsupported.go | 5 +- .../k8s.io/kubernetes/pkg/volume/fc/BUILD | 6 +- .../kubernetes/pkg/volume/fc/attacher.go | 7 +- .../kubernetes/pkg/volume/fc/disk_manager.go | 3 +- .../k8s.io/kubernetes/pkg/volume/fc/fc.go | 31 +- .../kubernetes/pkg/volume/fc/fc_util.go | 7 +- .../kubernetes/pkg/volume/flexvolume/BUILD | 4 +- .../kubernetes/pkg/volume/flexvolume/OWNERS | 2 +- .../volume/flexvolume/attacher-defaults.go | 2 +- .../volume/flexvolume/detacher-defaults.go | 5 +- .../pkg/volume/flexvolume/detacher.go | 5 +- .../pkg/volume/flexvolume/driver-call.go | 1 - .../pkg/volume/flexvolume/plugin.go | 6 +- .../volume/flexvolume/unmounter-defaults.go | 2 +- .../pkg/volume/flexvolume/unmounter.go | 5 +- .../kubernetes/pkg/volume/flexvolume/util.go | 3 +- .../pkg/volume/flexvolume/volume.go | 5 +- .../kubernetes/pkg/volume/flocker/BUILD | 4 +- .../kubernetes/pkg/volume/flocker/OWNERS | 1 - .../kubernetes/pkg/volume/flocker/flocker.go | 12 +- .../k8s.io/kubernetes/pkg/volume/gcepd/BUILD | 4 +- .../k8s.io/kubernetes/pkg/volume/gcepd/OWNERS | 1 - .../kubernetes/pkg/volume/gcepd/attacher.go | 18 +- .../kubernetes/pkg/volume/gcepd/gce_pd.go | 41 +- .../pkg/volume/gcepd/gce_pd_block.go | 28 +- .../kubernetes/pkg/volume/gcepd/gce_util.go | 9 +- .../pkg/volume/git_repo/git_repo.go | 4 - .../kubernetes/pkg/volume/glusterfs/BUILD | 4 +- .../pkg/volume/glusterfs/glusterfs.go | 34 +- .../kubernetes/pkg/volume/hostpath/BUILD | 2 +- .../pkg/volume/hostpath/host_path.go | 11 +- .../k8s.io/kubernetes/pkg/volume/iscsi/BUILD | 5 +- .../kubernetes/pkg/volume/iscsi/attacher.go | 9 +- .../pkg/volume/iscsi/disk_manager.go | 3 +- .../kubernetes/pkg/volume/iscsi/iscsi.go | 45 +- .../kubernetes/pkg/volume/iscsi/iscsi_util.go | 103 +- .../k8s.io/kubernetes/pkg/volume/local/BUILD | 28 +- .../kubernetes/pkg/volume/local/local.go | 59 +- .../k8s.io/kubernetes/pkg/volume/nfs/BUILD | 4 +- .../k8s.io/kubernetes/pkg/volume/nfs/nfs.go | 19 +- .../pkg/volume/noop_expandable_plugin.go | 4 - .../k8s.io/kubernetes/pkg/volume/plugins.go | 48 +- .../kubernetes/pkg/volume/portworx/BUILD | 4 +- .../pkg/volume/portworx/portworx.go | 13 +- .../pkg/volume/projected/projected.go | 4 - .../kubernetes/pkg/volume/quobyte/BUILD | 6 +- .../kubernetes/pkg/volume/quobyte/quobyte.go | 19 +- .../pkg/volume/quobyte/quobyte_util.go | 2 +- .../k8s.io/kubernetes/pkg/volume/rbd/BUILD | 5 +- .../kubernetes/pkg/volume/rbd/attacher.go | 7 +- .../kubernetes/pkg/volume/rbd/disk_manager.go | 7 +- .../k8s.io/kubernetes/pkg/volume/rbd/rbd.go | 32 +- .../kubernetes/pkg/volume/rbd/rbd_util.go | 57 +- .../kubernetes/pkg/volume/scaleio/BUILD | 5 +- .../pkg/volume/scaleio/sio_client.go | 8 +- .../kubernetes/pkg/volume/scaleio/sio_mgr.go | 9 +- .../pkg/volume/scaleio/sio_plugin.go | 4 - .../pkg/volume/scaleio/sio_volume.go | 7 +- .../k8s.io/kubernetes/pkg/volume/secret/BUILD | 2 +- .../kubernetes/pkg/volume/secret/OWNERS | 1 + .../kubernetes/pkg/volume/secret/secret.go | 13 +- .../kubernetes/pkg/volume/storageos/BUILD | 6 +- .../kubernetes/pkg/volume/storageos/OWNERS | 6 - .../pkg/volume/storageos/storageos.go | 20 +- .../pkg/volume/storageos/storageos_util.go | 17 +- .../k8s.io/kubernetes/pkg/volume/util/BUILD | 7 +- .../k8s.io/kubernetes/pkg/volume/util/OWNERS | 1 - .../pkg/volume/util/atomic_writer.go | 2 +- .../kubernetes/pkg/volume/util/exec/BUILD | 74 - .../pkg/volume/util/exec/exec_mount.go | 101 - .../util/exec/exec_mount_unsupported.go | 55 - .../kubernetes/pkg/volume/util/fs/BUILD | 7 + .../kubernetes/pkg/volume/util/fs/fs.go | 4 +- .../kubernetes/pkg/volume/util/fsquota/BUILD | 19 +- .../pkg/volume/util/fsquota/common/BUILD | 3 + .../pkg/volume/util/fsquota/quota.go | 3 +- .../pkg/volume/util/fsquota/quota_linux.go | 7 +- .../volume/util/fsquota/quota_unsupported.go | 4 +- .../kubernetes/pkg/volume/util/hostutil/BUILD | 10 +- .../pkg/volume/util/hostutil/fake_hostutil.go | 2 +- .../pkg/volume/util/hostutil/hostutil.go | 2 +- .../volume/util/hostutil/hostutil_linux.go | 4 +- .../util/hostutil/hostutil_unsupported.go | 2 +- .../volume/util/hostutil/hostutil_windows.go | 3 +- .../pkg/volume/util/operationexecutor/BUILD | 10 +- .../util/operationexecutor/fakegenerator.go | 5 + .../operationexecutor/operation_executor.go | 58 +- .../operationexecutor/operation_generator.go | 613 +- .../kubernetes/pkg/volume/util/resize_util.go | 3 +- .../kubernetes/pkg/volume/util/subpath/BUILD | 35 +- .../pkg/volume/util/subpath/subpath_linux.go | 10 +- .../util/subpath/subpath_unsupported.go | 2 +- .../volume/util/subpath/subpath_windows.go | 2 +- .../k8s.io/kubernetes/pkg/volume/util/util.go | 96 +- .../pkg/volume/util/volumepathhandler/BUILD | 12 +- .../volumepathhandler/volume_path_handler.go | 246 +- .../volume_path_handler_linux.go | 159 +- .../volume_path_handler_unsupported.go | 15 +- .../k8s.io/kubernetes/pkg/volume/volume.go | 48 +- .../kubernetes/pkg/volume/volume_linux.go | 2 + .../pkg/volume/vsphere_volume/BUILD | 4 +- .../pkg/volume/vsphere_volume/OWNERS | 4 +- .../pkg/volume/vsphere_volume/attacher.go | 9 +- .../volume/vsphere_volume/vsphere_volume.go | 13 +- .../vsphere_volume/vsphere_volume_block.go | 30 +- .../vsphere_volume_util_linux.go | 3 +- .../vsphere_volume_util_unsupported.go | 1 + .../vsphere_volume_util_windows.go | 1 + .../kubernetes/pkg/windows/service/service.go | 2 +- .../test/utils/admission_webhook.go | 60 +- .../k8s.io/kubernetes/test/utils/audit.go | 8 +- .../kubernetes/test/utils/audit_dynamic.go | 2 - .../k8s.io/kubernetes/test/utils/runners.go | 98 +- .../k8s.io/legacy-cloud-providers/aws/BUILD | 1 - .../k8s.io/legacy-cloud-providers/aws/aws.go | 78 +- .../legacy-cloud-providers/aws/aws_fakes.go | 17 +- .../legacy-cloud-providers/aws/aws_metrics.go | 8 +- .../k8s.io/legacy-cloud-providers/azure/BUILD | 4 +- .../legacy-cloud-providers/azure/azure.go | 17 + .../azure/azure_backoff.go | 4 +- .../azure/azure_blobDiskController.go | 5 +- .../azure/azure_cache.go | 74 +- .../azure/azure_client.go | 156 +- .../azure/azure_controller_common.go | 103 +- .../azure/azure_controller_standard.go | 26 +- .../azure/azure_controller_vmss.go | 32 +- .../azure/azure_fakes.go | 15 +- .../azure/azure_instance_metadata.go | 7 +- .../azure/azure_instances.go | 6 +- .../azure/azure_loadbalancer.go | 62 +- .../azure/azure_managedDiskController.go | 18 + .../azure/azure_metrics.go | 17 +- .../azure/azure_routes.go | 32 +- .../azure/azure_standard.go | 19 +- .../azure/azure_utils.go | 71 + .../azure/azure_vmsets.go | 4 +- .../azure/azure_vmss.go | 177 +- .../azure/azure_vmss_cache.go | 183 +- .../azure/azure_wrap.go | 20 +- .../azure/azure_zones.go | 2 +- .../k8s.io/legacy-cloud-providers/gce/gce.go | 8 +- .../legacy-cloud-providers/gce/gce_alpha.go | 3 + .../gce/gce_annotations.go | 15 + .../gce/gce_loadbalancer.go | 27 +- .../gce/gce_loadbalancer_external.go | 2 +- .../gce/gce_loadbalancer_internal.go | 178 +- .../legacy-cloud-providers/gce/gce_util.go | 2 +- .../legacy-cloud-providers/openstack/BUILD | 8 +- .../openstack/metadata.go | 4 +- .../openstack/openstack.go | 1 - .../openstack/openstack_loadbalancer.go | 4 +- .../openstack/openstack_volumes.go | 7 +- .../openstack/util/mount/BUILD | 43 - .../openstack/util/mount/mount.go | 380 - .../openstack/util/mount/mount_linux.go | 1345 -- .../openstack/util/mount/mount_unsupported.go | 162 - .../openstack/util/mount/mount_windows.go | 613 - .../vsphere/vclib/BUILD | 1 - .../vsphere/vclib/diskmanagers/vdm.go | 16 +- .../vsphere/vclib/vsphere_metrics.go | 10 +- .../pkg/util => utils}/mount/OWNERS | 0 .../pkg/util => utils}/mount/doc.go | 2 +- .../fake.go => utils/mount/fake_mounter.go} | 36 +- .../pkg/util => utils}/mount/mount.go | 26 +- .../mount/mount_helper_common.go | 0 .../util => utils}/mount/mount_helper_unix.go | 0 .../mount/mount_helper_windows.go | 3 + .../pkg/util => utils}/mount/mount_linux.go | 11 +- .../util => utils}/mount/mount_unsupported.go | 0 .../pkg/util => utils}/mount/mount_windows.go | 9 +- .../vendor/k8s.io/utils/net/net.go | 55 + cluster-autoscaler/vendor/modules.txt | 294 +- .../fieldpath/element.go | 25 +- .../fieldpath/fromvalue.go | 5 +- .../structured-merge-diff/fieldpath/path.go | 6 +- .../fieldpath/pathelementmap.go | 85 + .../fieldpath/serialize-pe.go | 31 +- .../structured-merge-diff/merge/update.go | 22 +- .../structured-merge-diff/schema/elements.go | 41 +- .../structured-merge-diff/typed/helpers.go | 29 +- .../structured-merge-diff/typed/merge.go | 72 +- .../structured-merge-diff/typed/remove.go | 6 +- .../structured-merge-diff/typed/typed.go | 19 +- .../structured-merge-diff/typed/validate.go | 31 +- .../structured-merge-diff/value/value.go | 279 +- 2102 files changed, 139697 insertions(+), 51711 deletions(-) create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicies.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go create mode 100644 cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go create mode 100644 cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/Protobuild.toml delete mode 100644 cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/etcd/raft/progress.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/etcd/raft/util.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen.go delete mode 100644 cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go delete mode 100644 cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go create mode 100644 cluster-autoscaler/vendor/github.com/godbus/dbus/auth_anonymous.go create mode 100644 cluster-autoscaler/vendor/github.com/godbus/dbus/conn_unix.go create mode 100644 cluster-autoscaler/vendor/github.com/godbus/dbus/conn_windows.go create mode 100644 cluster-autoscaler/vendor/github.com/godbus/dbus/go.mod create mode 100644 cluster-autoscaler/vendor/github.com/godbus/dbus/transport_nonce_tcp.go create mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh rename cluster-autoscaler/vendor/github.com/{pborman/uuid => mindprince/gonvml}/.travis.yml (54%) create mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go rename cluster-autoscaler/vendor/github.com/{pborman/uuid => munnerz/goautoneg}/LICENSE (53%) create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go rename cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/{cgroup_windows.go => cgroup_unsupported.go} (89%) delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTING.md delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/README.md delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/dce.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/doc.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/go.mod delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/go.sum delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/hash.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/marshal.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/node.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/sql.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/time.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/util.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/uuid.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/version1.go delete mode 100644 cluster-autoscaler/vendor/github.com/pborman/uuid/version4.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info.go rename cluster-autoscaler/vendor/github.com/{coreos/go-systemd/util/util_stub.go => prometheus/client_golang/prometheus/build_info_pre_1.12.go} (57%) delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/go.sum create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/fs/fs.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/parse.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/net_unix.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/nfs.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/parse.go delete mode 100644 cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/xfs.go create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/float32_slice.go create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/float64_slice.go create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/go.mod create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/go.sum create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/int32_slice.go create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/int64_slice.go create mode 100644 cluster-autoscaler/vendor/github.com/spf13/pflag/string_to_int64.go create mode 100644 cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_order.go create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/CHANGELOG.md create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/ioctl_linux.go create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/LICENSE (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/NOTICE (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/auth/authpb/auth.pb.go (60%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/auth/authpb/auth.proto (89%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/README.md rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/auth.go (74%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/balancer.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/connectivity/connectivity.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/picker/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/picker/err.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/picker/picker.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/picker/roundrobin_balanced.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/resolver/endpoint/endpoint.go (93%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/balancer/utils.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/client.go (92%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/cluster.go (69%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/compact_op.go (96%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/compare.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/config.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/credentials/credentials.go (67%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/doc.go (82%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/kv.go (99%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/lease.go (92%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/logger.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/maintenance.go (99%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/op.go (90%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/options.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/retry.go (97%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/retry_interceptor.go (99%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/sort.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/txn.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/utils.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/clientv3/watch.go (99%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/api/v3rpc/rpctypes/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/api/v3rpc/rpctypes/error.go (88%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/api/v3rpc/rpctypes/md.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/etcdserver.pb.go (75%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/etcdserver.proto (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/raft_internal.pb.go (63%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/raft_internal.proto (97%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/rpc.pb.go (62%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/etcdserver/etcdserverpb/rpc.proto (88%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/mvcc/mvccpb/kv.pb.go (75%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/mvcc/mvccpb/kv.proto (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/discard_logger.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/log_level.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/logger.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/merge_logger.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/package_logger.go (97%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/zap.go (88%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/zap_grpc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/zap_journal.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/logutil/zap_raft.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/systemd/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/systemd/journal.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/tlsutil/cipher_suites.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/tlsutil/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/tlsutil/tlsutil.go (99%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/keepalive_listener.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/limit_listen.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/listener.go (51%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/listener_tls.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_conn.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_dialer.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_listener.go (96%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_transport.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/tls.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/transport.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/unix_listener.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/doc.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/id.go (98%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/set.go (89%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/slice.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/urls.go (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/urlsmap.go (100%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/OWNERS rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/README.md (91%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/bootstrap.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/confchange.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/restore.go rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/design.md (100%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/doc.go (96%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/log.go (92%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/log_unstable.go (96%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/logger.go (96%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/node.go (65%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/joint.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/majority.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/quorum.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/raft.go (59%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/raftpb/raft.pb.go (52%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/rawnode.go (60%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/read_only.go (79%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/status.go (68%) rename cluster-autoscaler/vendor/{github.com/coreos => go.etcd.io}/etcd/raft/storage.go (98%) create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/inflights.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/progress.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/state.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/tracker.go create mode 100644 cluster-autoscaler/vendor/go.etcd.io/etcd/raft/util.go rename cluster-autoscaler/vendor/go.uber.org/{atomic/error.go => zap/global_go112.go} (56%) create mode 100644 cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go rename cluster-autoscaler/vendor/golang.org/x/net/idna/{idna.go => idna10.0.0.go} (99%) create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/idna/idna9.0.0.go rename cluster-autoscaler/vendor/golang.org/x/net/idna/{tables.go => tables10.0.0.go} (99%) create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_aix.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_stub.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/reflect.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_const_unix.go rename cluster-autoscaler/vendor/golang.org/x/net/internal/socket/{sys_go1_12_darwin.go => sys_linkname.go} (98%) create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/defs_aix.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_8.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_9.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_aix.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/defs_aix.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_aix.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/net/proxy/dial.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl rename cluster-autoscaler/vendor/golang.org/x/sys/unix/{openbsd_pledge.go => pledge_openbsd.go} (98%) create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go rename cluster-autoscaler/vendor/golang.org/x/sys/unix/{openbsd_unveil.go => unveil_openbsd.go} (98%) create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 cluster-autoscaler/vendor/golang.org/x/text/width/tables11.0.0.go delete mode 100644 cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go delete mode 100644 cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go delete mode 100644 cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go delete mode 100644 cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go delete mode 100644 cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_taints.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/BUILD rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/version => api/discovery/v1beta1}/doc.go (73%) create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/BUILD rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/apis/authentication/v1beta1/conversion.go => apimachinery/pkg/apis/meta/internalversion/scheme/doc.go} (73%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/BUILD rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/apis/authorization/v1/conversion.go => apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go} (66%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/nodeinfo/snapshot.go => apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go} (57%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => apiserver/pkg/apis/apiserver}/v1/BUILD (67%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => apiserver/pkg/apis/apiserver}/v1/register.go (73%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util/dbus => client-go/kubernetes/typed/discovery/v1beta1}/doc.go (73%) create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/csinode.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/tools/events/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/util/cert/server_inspection.go create mode 100644 cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go create mode 100644 cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/config/validation/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/config/validation/validation.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/metrics/collector.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/metrics/desc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/metrics/http.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/metrics/labels.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/client/metrics/prometheus => component-base/metrics/prometheus/restclient}/BUILD (57%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/client/metrics/prometheus/prometheus.go => component-base/metrics/prometheus/restclient/metrics.go} (66%) create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg => component-base}/version/verflag/BUILD (63%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg => component-base}/version/verflag/verflag.go (98%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/LICENSE create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/BUILD rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => kube-scheduler/config/v1}/doc.go (83%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => kube-scheduler/config/v1}/register.go (56%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => kube-scheduler/config}/v1/types.go (69%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/scheduler/api => kube-scheduler/config/v1}/zz_generated.deepcopy.go (54%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/types.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/zz_generated.deepcopy.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/deviceplugin/v1beta1/BUILD (85%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/deviceplugin/v1beta1/api.pb.go (99%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/deviceplugin/v1beta1/api.proto (99%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/deviceplugin/v1beta1/constants.go (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/pluginregistration/v1/BUILD (82%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/pluginregistration/v1/api.pb.go (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/pluginregistration/v1/api.proto (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/kubelet => kubelet/pkg}/apis/pluginregistration/v1/constants.go (100%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/defaults.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.defaults.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/{ => v1}/helpers.go (67%) rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/{volume/csi/csiv0 => controller/volume/scheduling/metrics}/BUILD (54%) rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/{scheduler_bind_cache_metrics.go => metrics/metrics.go} (94%) delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology_hints.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/{socketmask => bitmask}/BUILD (87%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/socketmask.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_others.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/{apis/authorization/v1beta1/conversion.go => kubelet/kubelet_node_status_windows.go} (61%) delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/proxier_health.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/{healthcheck.go => service_health.go} (51%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/testing_helper.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/{factory/plugins.go => algorithm_factory.go} (59%) delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/latest.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/validation.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/{api/types.go => apis/config/legacy_types.go} (68%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/{api => apis/config}/validation/BUILD (53%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/BUILD rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/{api => apis/extender}/v1/doc.go (84%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/types.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/default_registry.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/utils.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/node_label.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/{kubelet/mountpod => scheduler/framework/plugins/nodepreferavoidpods}/BUILD (55%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/context.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/cycle_state.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/metrics_recorder.go rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/{util/dbus => scheduler/internal/heap}/BUILD (63%) rename cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/{util => internal/heap}/heap.go (86%) create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/events.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/snapshot.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/util.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/dbus.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/.gitattributes delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/base.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/def.bzl delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/version.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/plugin_manager.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/OWNERS delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount_unsupported.go create mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/BUILD delete mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_unsupported.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_windows.go rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/OWNERS (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/doc.go (91%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util/mount/fake.go => utils/mount/fake_mounter.go} (85%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount.go (91%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_helper_common.go (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_helper_unix.go (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_helper_windows.go (93%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_linux.go (97%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_unsupported.go (100%) rename cluster-autoscaler/vendor/k8s.io/{kubernetes/pkg/util => utils}/mount/mount_windows.go (95%) create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/pathelementmap.go diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index c0af36976a0..d9505c5e05a 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -8,30 +8,29 @@ go 1.12 require ( cloud.google.com/go v0.38.0 - github.com/Azure/azure-sdk-for-go v32.5.0+incompatible + github.com/Azure/azure-sdk-for-go v35.0.0+incompatible github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest/adal v0.5.0 github.com/Azure/go-autorest/autorest/to v0.2.0 github.com/aws/aws-sdk-go v1.23.18 - github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 + github.com/ghodss/yaml v1.0.0 github.com/google/go-querystring v1.0.0 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af - github.com/json-iterator/go v1.1.7 - github.com/prometheus/client_golang v0.9.2 + github.com/json-iterator/go v1.1.8 github.com/satori/go.uuid v1.2.0 - github.com/spf13/pflag v1.0.3 - github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 gopkg.in/gcfg.v1 v1.2.0 - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/cloud-provider v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/klog v0.4.0 + k8s.io/klog v1.0.0 k8s.io/kubernetes v0.0.0 k8s.io/legacy-cloud-providers v0.0.0 ) @@ -39,7 +38,7 @@ require ( replace ( bitbucket.org/bertimus9/systemstat => bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690 cloud.google.com/go => cloud.google.com/go v0.38.0 - github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v32.5.0+incompatible + github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v35.0.0+incompatible github.com/Azure/go-ansiterm => github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-autorest/autorest => github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest/adal => github.com/Azure/go-autorest/autorest/adal v0.5.0 @@ -55,22 +54,32 @@ replace ( github.com/JeffAshton/win_pdh => github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab github.com/MakeNowJust/heredoc => github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd github.com/Microsoft/go-winio => github.com/Microsoft/go-winio v0.4.11 - github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.8.6 + github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d github.com/NYTimes/gziphandler => github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 + github.com/OpenPeeDeeP/depguard => github.com/OpenPeeDeeP/depguard v1.0.1 github.com/PuerkitoBio/purell => github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/urlesc => github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/Rican7/retry => github.com/Rican7/retry v0.1.0 + github.com/StackExchange/wmi => github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 + github.com/agnivade/levenshtein => github.com/agnivade/levenshtein v1.0.1 + github.com/alecthomas/template => github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc + github.com/alecthomas/units => github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf + github.com/andreyvit/diff => github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 + github.com/anmitsu/go-shlex => github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/armon/circbuf => github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e github.com/armon/consul-api => github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 github.com/asaskevich/govalidator => github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/auth0/go-jwt-middleware => github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7 github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.23.18 - github.com/bazelbuild/bazel-gazelle => github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e - github.com/bazelbuild/buildtools => github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/bazelbuild/bazel-gazelle => github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798 + github.com/bazelbuild/buildtools => github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89 + github.com/bazelbuild/rules_go => github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab + github.com/beorn7/perks => github.com/beorn7/perks v1.0.0 + github.com/bgentry/speakeasy => github.com/bgentry/speakeasy v0.1.0 github.com/bifurcation/mint => github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115 github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 + github.com/bradfitz/go-smtpd => github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625 github.com/caddyserver/caddy => github.com/caddyserver/caddy v1.0.3 github.com/cenkalti/backoff => github.com/cenkalti/backoff v2.1.1+incompatible github.com/cespare/prettybench => github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c @@ -78,24 +87,24 @@ replace ( github.com/checkpoint-restore/go-criu => github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/cheekybits/genny => github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 github.com/client9/misspell => github.com/client9/misspell v0.3.4 - github.com/cloudflare/cfssl => github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf github.com/clusterhq/flocker-go => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 + github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa github.com/codegangsta/negroni => github.com/codegangsta/negroni v1.0.0 - github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.1.0 + github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.2.0 github.com/containerd/console => github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa github.com/containerd/containerd => github.com/containerd/containerd v1.0.2 github.com/containerd/typeurl => github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 github.com/containernetworking/cni => github.com/containernetworking/cni v0.7.1 - github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.2 - github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.1-coreos.6 - github.com/coreos/etcd => github.com/coreos/etcd v3.3.15+incompatible + github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.4 + github.com/coreos/etcd => github.com/coreos/etcd v3.3.10+incompatible github.com/coreos/go-etcd => github.com/coreos/go-etcd v2.0.0+incompatible github.com/coreos/go-oidc => github.com/coreos/go-oidc v2.1.0+incompatible github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0 - github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 + github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea github.com/coreos/rkt => github.com/coreos/rkt v1.30.0 github.com/cpuguy83/go-md2man => github.com/cpuguy83/go-md2man v1.0.10 + github.com/creack/pty => github.com/creack/pty v1.1.7 github.com/cyphar/filepath-securejoin => github.com/cyphar/filepath-securejoin v0.2.2 github.com/davecgh/go-spew => github.com/davecgh/go-spew v1.1.1 github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd @@ -104,8 +113,8 @@ replace ( github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker => github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 github.com/docker/go-connections => github.com/docker/go-connections v0.3.0 - github.com/docker/go-units => github.com/docker/go-units v0.3.3 - github.com/docker/libnetwork => github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789 + github.com/docker/go-units => github.com/docker/go-units v0.4.0 + github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 github.com/docker/spdystream => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.0 github.com/elazarl/goproxy => github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e @@ -114,38 +123,72 @@ replace ( github.com/evanphx/json-patch => github.com/evanphx/json-patch v4.2.0+incompatible github.com/exponent-io/jsonpath => github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d github.com/fatih/camelcase => github.com/fatih/camelcase v1.0.0 - github.com/fatih/color => github.com/fatih/color v1.6.0 + github.com/fatih/color => github.com/fatih/color v1.7.0 github.com/flynn/go-shlex => github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 github.com/fsnotify/fsnotify => github.com/fsnotify/fsnotify v1.4.7 - github.com/ghodss/yaml => github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 - github.com/globalsign/mgo => github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 + github.com/ghodss/yaml => github.com/ghodss/yaml v1.0.0 + github.com/gliderlabs/ssh => github.com/gliderlabs/ssh v0.1.1 github.com/go-acme/lego => github.com/go-acme/lego v2.5.0+incompatible github.com/go-bindata/go-bindata => github.com/go-bindata/go-bindata v3.1.1+incompatible + github.com/go-critic/go-critic => github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540 + github.com/go-kit/kit => github.com/go-kit/kit v0.8.0 + github.com/go-lintpack/lintpack => github.com/go-lintpack/lintpack v0.5.2 + github.com/go-logfmt/logfmt => github.com/go-logfmt/logfmt v0.3.0 github.com/go-logr/logr => github.com/go-logr/logr v0.1.0 - github.com/go-openapi/analysis => github.com/go-openapi/analysis v0.19.2 + github.com/go-ole/go-ole => github.com/go-ole/go-ole v1.2.1 + github.com/go-openapi/analysis => github.com/go-openapi/analysis v0.19.5 github.com/go-openapi/errors => github.com/go-openapi/errors v0.19.2 - github.com/go-openapi/jsonpointer => github.com/go-openapi/jsonpointer v0.19.2 - github.com/go-openapi/jsonreference => github.com/go-openapi/jsonreference v0.19.2 - github.com/go-openapi/loads => github.com/go-openapi/loads v0.19.2 - github.com/go-openapi/runtime => github.com/go-openapi/runtime v0.19.0 - github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.2 - github.com/go-openapi/strfmt => github.com/go-openapi/strfmt v0.19.0 - github.com/go-openapi/swag => github.com/go-openapi/swag v0.19.2 - github.com/go-openapi/validate => github.com/go-openapi/validate v0.19.2 + github.com/go-openapi/jsonpointer => github.com/go-openapi/jsonpointer v0.19.3 + github.com/go-openapi/jsonreference => github.com/go-openapi/jsonreference v0.19.3 + github.com/go-openapi/loads => github.com/go-openapi/loads v0.19.4 + github.com/go-openapi/runtime => github.com/go-openapi/runtime v0.19.4 + github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.3 + github.com/go-openapi/strfmt => github.com/go-openapi/strfmt v0.19.3 + github.com/go-openapi/swag => github.com/go-openapi/swag v0.19.5 + github.com/go-openapi/validate => github.com/go-openapi/validate v0.19.5 github.com/go-ozzo/ozzo-validation => github.com/go-ozzo/ozzo-validation v3.5.0+incompatible - github.com/godbus/dbus => github.com/godbus/dbus v4.1.0+incompatible + github.com/go-stack/stack => github.com/go-stack/stack v1.8.0 + github.com/go-toolsmith/astcast => github.com/go-toolsmith/astcast v1.0.0 + github.com/go-toolsmith/astcopy => github.com/go-toolsmith/astcopy v1.0.0 + github.com/go-toolsmith/astequal => github.com/go-toolsmith/astequal v1.0.0 + github.com/go-toolsmith/astfmt => github.com/go-toolsmith/astfmt v1.0.0 + github.com/go-toolsmith/astinfo => github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21 + github.com/go-toolsmith/astp => github.com/go-toolsmith/astp v1.0.0 + github.com/go-toolsmith/pkgload => github.com/go-toolsmith/pkgload v1.0.0 + github.com/go-toolsmith/strparse => github.com/go-toolsmith/strparse v1.0.0 + github.com/go-toolsmith/typep => github.com/go-toolsmith/typep v1.0.0 + github.com/gobwas/glob => github.com/gobwas/glob v0.2.3 + github.com/godbus/dbus => github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f github.com/gogo/protobuf => github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/glog => github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/groupcache => github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 github.com/golang/mock => github.com/golang/mock v1.2.0 - github.com/golang/protobuf => github.com/golang/protobuf v1.3.1 + github.com/golang/protobuf => github.com/golang/protobuf v1.3.2 + github.com/golangci/check => github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 + github.com/golangci/dupl => github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a + github.com/golangci/errcheck => github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 + github.com/golangci/go-misc => github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 + github.com/golangci/go-tools => github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c + github.com/golangci/goconst => github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 + github.com/golangci/gocyclo => github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee + github.com/golangci/gofmt => github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98 + github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 + github.com/golangci/gosec => github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547 + github.com/golangci/ineffassign => github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc + github.com/golangci/lint-1 => github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217 + github.com/golangci/maligned => github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca + github.com/golangci/misspell => github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 + github.com/golangci/prealloc => github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 + github.com/golangci/revgrep => github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 + github.com/golangci/unconvert => github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 github.com/golangplus/bytes => github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450 github.com/golangplus/fmt => github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995 github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e - github.com/google/btree => github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c + github.com/google/btree => github.com/google/btree v1.0.0 github.com/google/cadvisor => github.com/google/cadvisor v0.34.0 - github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21 github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 + github.com/google/go-github => github.com/google/go-github v17.0.0+incompatible + github.com/google/go-querystring => github.com/google/go-querystring v1.0.0 github.com/google/gofuzz => github.com/google/gofuzz v1.0.0 github.com/google/martian => github.com/google/martian v2.1.0+incompatible github.com/google/pprof => github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 @@ -158,31 +201,34 @@ replace ( github.com/gorilla/context => github.com/gorilla/context v1.1.1 github.com/gorilla/mux => github.com/gorilla/mux v1.7.0 github.com/gorilla/websocket => github.com/gorilla/websocket v1.4.0 - github.com/gregjones/httpcache => github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 - github.com/grpc-ecosystem/go-grpc-middleware => github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79 + github.com/gostaticanalysis/analysisutil => github.com/gostaticanalysis/analysisutil v0.0.3 + github.com/gregjones/httpcache => github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 + github.com/grpc-ecosystem/go-grpc-middleware => github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/grpc-ecosystem/go-grpc-prometheus => github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.3.0 + github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.9.5 github.com/hashicorp/go-syslog => github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/golang-lru => github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.0 - github.com/heketi/heketi => github.com/heketi/heketi v9.0.0+incompatible - github.com/heketi/rest => github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413 + github.com/heketi/heketi => github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible github.com/heketi/tests => github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 - github.com/heketi/utils => github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64 github.com/hpcloud/tail => github.com/hpcloud/tail v1.0.0 github.com/imdario/mergo => github.com/imdario/mergo v0.3.5 github.com/inconshreveable/mousetrap => github.com/inconshreveable/mousetrap v1.0.0 + github.com/jellevandenhooff/dkim => github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1 github.com/jimstudt/http-authentication => github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a github.com/jmespath/go-jmespath => github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jonboulle/clockwork => github.com/jonboulle/clockwork v0.1.0 - github.com/json-iterator/go => github.com/json-iterator/go v1.1.7 + github.com/json-iterator/go => github.com/json-iterator/go v1.1.8 github.com/jstemmer/go-junit-report => github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 github.com/jtolds/gls => github.com/jtolds/gls v4.20.0+incompatible + github.com/julienschmidt/httprouter => github.com/julienschmidt/httprouter v1.2.0 github.com/karrick/godirwalk => github.com/karrick/godirwalk v1.7.5 github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.2.0 github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 + github.com/klauspost/compress => github.com/klauspost/compress v1.4.1 github.com/klauspost/cpuid => github.com/klauspost/cpuid v1.2.0 github.com/konsorten/go-windows-terminal-sequences => github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/kr/logfmt => github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 github.com/kr/pretty => github.com/kr/pretty v0.1.0 github.com/kr/pty => github.com/kr/pty v1.1.5 github.com/kr/text => github.com/kr/text v0.1.0 @@ -190,168 +236,208 @@ replace ( github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.0 github.com/liggitt/tabwriter => github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de github.com/lithammer/dedent => github.com/lithammer/dedent v1.1.0 + github.com/logrusorgru/aurora => github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e github.com/lpabon/godbc => github.com/lpabon/godbc v0.1.1 github.com/lucas-clemente/aes12 => github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f github.com/lucas-clemente/quic-clients => github.com/lucas-clemente/quic-clients v0.1.0 github.com/lucas-clemente/quic-go => github.com/lucas-clemente/quic-go v0.10.2 github.com/lucas-clemente/quic-go-certificates => github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced github.com/magiconair/properties => github.com/magiconair/properties v1.8.1 - github.com/mailru/easyjson => github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 + github.com/mailru/easyjson => github.com/mailru/easyjson v0.7.0 github.com/marten-seemann/qtls => github.com/marten-seemann/qtls v0.2.3 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.0.9 - github.com/mattn/go-isatty => github.com/mattn/go-isatty v0.0.3 + github.com/mattn/go-isatty => github.com/mattn/go-isatty v0.0.9 + github.com/mattn/go-runewidth => github.com/mattn/go-runewidth v0.0.2 github.com/mattn/go-shellwords => github.com/mattn/go-shellwords v1.0.5 + github.com/mattn/goveralls => github.com/mattn/goveralls v0.0.2 github.com/matttproud/golang_protobuf_extensions => github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mesos/mesos-go => github.com/mesos/mesos-go v0.0.9 github.com/mholt/certmagic => github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 github.com/miekg/dns => github.com/miekg/dns v1.1.4 - github.com/mindprince/gonvml => github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2 + github.com/mindprince/gonvml => github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 github.com/mistifyio/go-zfs => github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/mitchellh/go-homedir => github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/go-ps => github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 github.com/mitchellh/go-wordwrap => github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/mapstructure => github.com/mitchellh/mapstructure v1.1.2 github.com/modern-go/concurrent => github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/reflect2 => github.com/modern-go/reflect2 v1.0.1 github.com/mohae/deepcopy => github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb github.com/morikuni/aec => github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c - github.com/mrunalp/fileutils => github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058 - github.com/munnerz/goautoneg => github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d + github.com/mozilla/tls-observatory => github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40 + github.com/mrunalp/fileutils => github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 + github.com/munnerz/goautoneg => github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mvdan/xurls => github.com/mvdan/xurls v1.1.0 + github.com/mwitkow/go-conntrack => github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 github.com/mxk/go-flowrate => github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/naoina/go-stringutil => github.com/naoina/go-stringutil v0.1.0 github.com/naoina/toml => github.com/naoina/toml v0.1.1 - github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega => github.com/onsi/gomega v1.5.0 + github.com/nbutton23/zxcvbn-go => github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663 + github.com/olekukonko/tablewriter => github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 + github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.10.1 + github.com/onsi/gomega => github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1 - github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830 + github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9 github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.0 - github.com/opencontainers/selinux => github.com/opencontainers/selinux v1.2.2 - github.com/pborman/uuid => github.com/pborman/uuid v1.2.0 + github.com/opencontainers/selinux => github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible - github.com/pkg/errors => github.com/pkg/errors v0.8.0 + github.com/pkg/errors => github.com/pkg/errors v0.8.1 github.com/pmezard/go-difflib => github.com/pmezard/go-difflib v1.0.0 github.com/pquerna/cachecontrol => github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 github.com/pquerna/ffjson => github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20 - github.com/prometheus/client_golang => github.com/prometheus/client_golang v0.9.2 - github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 - github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a + github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/common => github.com/prometheus/common v0.4.1 + github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.2 + github.com/quasilyte/go-consistent => github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c github.com/quobyte/api => github.com/quobyte/api v0.1.2 github.com/remyoudompheng/bigfft => github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 github.com/robfig/cron => github.com/robfig/cron v1.1.0 + github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.3.0 github.com/rubiojr/go-vhd => github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c github.com/russross/blackfriday => github.com/russross/blackfriday v1.5.2 + github.com/ryanuber/go-glob => github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 github.com/seccomp/libseccomp-golang => github.com/seccomp/libseccomp-golang v0.9.1 + github.com/sergi/go-diff => github.com/sergi/go-diff v1.0.0 + github.com/shirou/gopsutil => github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7 + github.com/shirou/w32 => github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 + github.com/shurcooL/go => github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e + github.com/shurcooL/go-goon => github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2 github.com/smartystreets/assertions => github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d github.com/smartystreets/goconvey => github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a - github.com/soheilhy/cmux => github.com/soheilhy/cmux v0.1.3 + github.com/soheilhy/cmux => github.com/soheilhy/cmux v0.1.4 + github.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 github.com/spf13/afero => github.com/spf13/afero v1.2.2 github.com/spf13/cast => github.com/spf13/cast v1.3.0 github.com/spf13/cobra => github.com/spf13/cobra v0.0.5 github.com/spf13/jwalterweatherman => github.com/spf13/jwalterweatherman v1.1.0 - github.com/spf13/pflag => github.com/spf13/pflag v1.0.3 + github.com/spf13/pflag => github.com/spf13/pflag v1.0.5 github.com/spf13/viper => github.com/spf13/viper v1.3.2 github.com/storageos/go-api => github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc github.com/stretchr/objx => github.com/stretchr/objx v0.2.0 - github.com/stretchr/testify => github.com/stretchr/testify v1.3.0 - github.com/syndtr/gocapability => github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4 + github.com/stretchr/testify => github.com/stretchr/testify v1.4.0 + github.com/syndtr/gocapability => github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 + github.com/tarm/serial => github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 github.com/thecodeteam/goscaleio => github.com/thecodeteam/goscaleio v0.1.0 + github.com/tidwall/pretty => github.com/tidwall/pretty v1.0.0 + github.com/timakin/bodyclose => github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec github.com/tmc/grpc-websocket-proxy => github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 github.com/ugorji/go/codec => github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 + github.com/ultraware/funlen => github.com/ultraware/funlen v0.0.2 + github.com/urfave/cli => github.com/urfave/cli v1.20.0 github.com/urfave/negroni => github.com/urfave/negroni v1.0.0 - github.com/vishvananda/netlink => github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e + github.com/valyala/bytebufferpool => github.com/valyala/bytebufferpool v1.0.0 + github.com/valyala/fasthttp => github.com/valyala/fasthttp v1.2.0 + github.com/valyala/quicktemplate => github.com/valyala/quicktemplate v1.1.1 + github.com/valyala/tcplisten => github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a + github.com/vektah/gqlparser => github.com/vektah/gqlparser v1.1.2 + github.com/vishvananda/netlink => github.com/vishvananda/netlink v1.0.0 github.com/vishvananda/netns => github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 - github.com/vmware/govmomi => github.com/vmware/govmomi v0.20.1 - github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 + github.com/vmware/govmomi => github.com/vmware/govmomi v0.20.3 + github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 github.com/xlab/handysort => github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 + go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.3 + go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 + go.mongodb.org/mongo-driver => go.mongodb.org/mongo-driver v1.1.2 go.opencensus.io => go.opencensus.io v0.21.0 - go.uber.org/atomic => go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 - go.uber.org/multierr => go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df - go.uber.org/zap => go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 - golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 + go.uber.org/atomic => go.uber.org/atomic v1.3.2 + go.uber.org/multierr => go.uber.org/multierr v1.1.0 + go.uber.org/zap => go.uber.org/zap v1.10.0 + go4.org => go4.org v0.0.0-20180809161055-417644f6feb5 + golang.org/x/build => golang.org/x/build v0.0.0-20190927031335-2835ba2e683f + golang.org/x/crypto => golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 golang.org/x/exp => golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 golang.org/x/image => golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 - golang.org/x/lint => golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 + golang.org/x/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 golang.org/x/mobile => golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6 golang.org/x/mod => golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e - golang.org/x/net => golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a - golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db - golang.org/x/time => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d - golang.org/x/tools => golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac + golang.org/x/net => golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 + golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/perf => golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852 + golang.org/x/sync => golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 + golang.org/x/text => golang.org/x/text v0.3.2 + golang.org/x/time => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 + golang.org/x/xerrors => golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 gonum.org/v1/gonum => gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 gonum.org/v1/netlib => gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e google.golang.org/api => google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 google.golang.org/appengine => google.golang.org/appengine v1.5.0 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 - google.golang.org/grpc => google.golang.org/grpc v1.23.0 + google.golang.org/grpc => google.golang.org/grpc v1.23.1 gopkg.in/airbrake/gobrake.v2 => gopkg.in/airbrake/gobrake.v2 v2.0.9 + gopkg.in/alecthomas/kingpin.v2 => gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 + gopkg.in/cheggaaa/pb.v1 => gopkg.in/cheggaaa/pb.v1 v1.0.25 gopkg.in/errgo.v2 => gopkg.in/errgo.v2 v2.1.0 gopkg.in/fsnotify.v1 => gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/gcfg.v1 => gopkg.in/gcfg.v1 v1.2.0 gopkg.in/gemnasium/logrus-airbrake-hook.v2 => gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 - gopkg.in/inf.v0 => gopkg.in/inf.v0 v0.9.0 + gopkg.in/inf.v0 => gopkg.in/inf.v0 v0.9.1 gopkg.in/mcuadros/go-syslog.v2 => gopkg.in/mcuadros/go-syslog.v2 v2.2.1 gopkg.in/natefinch/lumberjack.v2 => gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/resty.v1 => gopkg.in/resty.v1 v1.12.0 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/tomb.v1 => gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/warnings.v0 => gopkg.in/warnings.v0 v0.1.1 - gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.4 gotest.tools => gotest.tools v2.2.0+incompatible gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 + grpc.go4.org => grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 - k8s.io/api => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/api - k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apiextensions-apiserver - k8s.io/apimachinery => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apimachinery - k8s.io/apiserver => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apiserver - k8s.io/cli-runtime => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cli-runtime - k8s.io/client-go => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/client-go - k8s.io/cloud-provider => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cloud-provider - k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cluster-bootstrap - k8s.io/code-generator => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/code-generator - k8s.io/component-base => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/component-base - k8s.io/cri-api => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cri-api - k8s.io/csi-translation-lib => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/csi-translation-lib + k8s.io/api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/api + k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiextensions-apiserver + k8s.io/apimachinery => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apimachinery + k8s.io/apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiserver + k8s.io/cli-runtime => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cli-runtime + k8s.io/client-go => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/client-go + k8s.io/cloud-provider => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cloud-provider + k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cluster-bootstrap + k8s.io/code-generator => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/code-generator + k8s.io/component-base => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/component-base + k8s.io/cri-api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cri-api + k8s.io/csi-translation-lib => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/csi-translation-lib k8s.io/gengo => k8s.io/gengo v0.0.0-20190822140433-26a664648505 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 - k8s.io/klog => k8s.io/klog v0.4.0 - k8s.io/kube-aggregator => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kube-aggregator - k8s.io/kube-controller-manager => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kube-controller-manager - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf - k8s.io/kube-proxy => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kube-proxy - k8s.io/kube-scheduler => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kube-scheduler - k8s.io/kubectl => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kubectl - k8s.io/kubelet => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kubelet - k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/legacy-cloud-providers - k8s.io/metrics => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/metrics - k8s.io/node-api => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/node-api - k8s.io/repo-infra => k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3 - k8s.io/sample-apiserver => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/sample-apiserver - k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/sample-cli-plugin - k8s.io/sample-controller => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/sample-controller - k8s.io/utils => k8s.io/utils v0.0.0-20190801114015-581e00157fb1 + k8s.io/klog => k8s.io/klog v1.0.0 + k8s.io/kube-aggregator => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-aggregator + k8s.io/kube-controller-manager => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-controller-manager + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a + k8s.io/kube-proxy => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-proxy + k8s.io/kube-scheduler => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-scheduler + k8s.io/kubectl => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubectl + k8s.io/kubelet => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubelet + k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/legacy-cloud-providers + k8s.io/metrics => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/metrics + k8s.io/node-api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/node-api + k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1 + k8s.io/sample-apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-apiserver + k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-cli-plugin + k8s.io/sample-controller => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-controller + k8s.io/system-validators => k8s.io/system-validators v1.0.4 + k8s.io/utils => k8s.io/utils v0.0.0-20191114184206-e782cd3c129f modernc.org/cc => modernc.org/cc v1.0.0 modernc.org/golex => modernc.org/golex v1.0.0 modernc.org/mathutil => modernc.org/mathutil v1.0.0 modernc.org/strutil => modernc.org/strutil v1.0.0 modernc.org/xc => modernc.org/xc v1.0.0 + mvdan.cc/interfacer => mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed + mvdan.cc/lint => mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b + mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible - sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca + sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.1.0 + sourcegraph.com/sqs/pbtypes => sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc ) replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace github.com/google/go-querystring => github.com/google/go-querystring v1.0.0 - -replace k8s.io/kubernetes => /tmp/ca-update-vendor.oU6q/kubernetes +replace k8s.io/kubernetes => /tmp/ca-update-vendor.oDH2/kubernetes diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index 98cba7315c1..77166ae6e97 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -1,8 +1,8 @@ bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible h1:Hn/DsObfmw0M7dMGS/c0MlVrJuGFzHzOpBWL89acR68= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible h1:PkmdmQUmeSdQQ5258f4SyCf2Zcz0w67qztEg37cOR7U= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= @@ -30,16 +30,23 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5 github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d h1:u64+IetywsPQ0gJ/4cXBJ/KiXV9xTKRMoaCOzW9PI3g= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0 h1:FqK94z34ly8Baa6K+G8Mmza9rYWTKOJk+yckIBB5qVk= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -49,15 +56,18 @@ github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7 h1:irR1cO6 github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v1.23.18 h1:ADU/y1EO8yPzUJJYjcvJ0V9/suezxPh0u6hb5bSYIGQ= github.com/aws/aws-sdk-go v1.23.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1:uHBSeeATKpVazAACZBDPL/Nk/UhQDDsJWDlqYJo8/Us= -github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= +github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= @@ -66,14 +76,14 @@ github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nW github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf h1:eOyFuj3h/Vj5e4voOM16NNrHsUR3jhD0duh76LHMj6Y= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codegangsta/negroni v1.0.0 h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs= -github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa h1:GnRy2maqb8vcJhYRN5L+5WyYNKfUG4otiz2zxE182ng= github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.0.2 h1:AcqeeOunmUuo2CvPPtHMhWn7mi54clu+j9yqXKxGFtk= @@ -82,22 +92,20 @@ github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 h1:14r0i3IeJj6z github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coredns/corefile-migration v1.0.4/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/rkt v1.30.0 h1:Kkt6sYeEGKxA3Y7SCrY+nHoXkWed6Jr2BBY42GqMymM= +github.com/coreos/rkt v1.30.0 h1:dFPDEVnztemjpNGzxWdJivfwNbUz2VAlYccttRb6H6A= github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -109,16 +117,17 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:sLjcXL0ozEc1CVv4OhvrLFbieIL8OxgtFTMykm8idvo= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789 h1:8rOK787QQFFZJcOLXPiKKidY/ie2OQpblM5gEAaenPs= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 h1:8GFZB1KesbMy2X2zTiJyAuwCow+U1GT0ueD42p59y4k= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -130,34 +139,50 @@ github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= -github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -166,19 +191,35 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieF github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.34.0 h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw= github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= @@ -201,48 +242,49 @@ github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79 h1:lR9ssWAqp9qL0bALxqEEkuudiP1eweOdv9jsRK3e7lE= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0 h1:HJtP6RRwj2EpPCD/mhAWzSvLL/dFTdPm1UrWwanoFos= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/heketi/heketi v9.0.0+incompatible h1:B2ACAbYsCHkJXKozYVV7p2j+eEy/zNlLsicihMWCk30= -github.com/heketi/heketi v9.0.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413 h1:nGZBOxRgSMbqjm2/FYDtO6BU4a+hfR7Om9VGQ9tbbSc= -github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413/go.mod h1:BeS3M108VzVlmAue3lv2WcGuPAX94/KN63MUURzbYSI= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible h1:ysqc8k973k1lLJ4BOOHAkx14K2nt4cLjsIm+hwWDZDE= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6VxG31lg0d6XOURNA09BTtM4fY= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64 h1:dk3GEa55HcRVIyCeNQmwwwH3kIXnqJPNseKOkDD+7uQ= -github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.7.5 h1:JQFiMR65pT543bkWP46+k194gS999qo/OYccos9cOXg= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -254,6 +296,7 @@ github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1 h1:ilqjArN1UOENJJdM34I2YHKmF/B0gGq4VLoSGy9iAao= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= @@ -261,13 +304,15 @@ github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mesos/mesos-go v0.0.9 h1:w8V5sOEnxzHZ2kAOy273v/HgbolyI6XI+qe5jx5u+Y0= @@ -275,11 +320,12 @@ github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.4 h1:rCMZsU2ScVSYcAsOXgmC6+AKOK+6pmQTOcw03nfwYV0= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2 h1:v3dy+FJr7gS7nLgYG7YjX/pmUWuFdudcpnoRNHt2heo= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -291,130 +337,160 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQih github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058 h1:A4y2IxU1GcIzlcmUlQ6yr/mrvYZhqo+HakAPwgwaa6s= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830 h1:yvQ/2Pupw60ON8TYEIGGTAI77yZsWYkiOeHFZWkwlCk= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= -github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 h1:B8hYj3NxHmjsC3T+tnlZ1UhInqUgnyF1zlGPmzNg2Qk= +github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20 h1:7sBb9iOkeq+O7AXlVoH/8zpIcRXX523zMkKKspHjjx8= github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quobyte/api v0.1.2 h1:lPHLsuvtjFyk8WhC4uHoHRkScijIHcffTWBBP+YpzYo= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c h1:ht7N4d/B7Ezf58nvMNVF3OlvDlz9pp+WHVcRNS0nink= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.3 h1:09wy7WZk4AqO03yH85Ex1X+Uo3vDsil3Fa9AgF8Emss= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc h1:n+WYaU0kQ6WIiuEyWSgbXqkBx16irO69kYCtwVYoO5s= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4 h1:w58e6FAOMd+rUgOfhaBb+ZVOQIOfUkpv5AAQVmf6hsI= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thecodeteam/goscaleio v0.1.0 h1:SB5tO98lawC+UK8ds/U2jyfOCH7GTcFztcF5x9gbut4= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 h1:J9gO8RJCAFlln1jsvRba/CWVUnMHwObklfxxjErl1uk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.1 h1:7b/SeTUB3tER8ZLGLLLH3xcnB2xeuLULXmfPFqPSRZA= -github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrKP0VuBPijjk3TfX6Y6acFNg= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 h1:ppLucX0K/60T3t6LPZQzTOkt5PytkEbQLIaSteq+TpE= @@ -423,50 +499,59 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:y0IMTfclpMdsdIbr6uwmJn5/WZ7vFuObxDMdrylFM3A= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3/go.mod h1:+G1xBfZDfVFsm1Tj/HNCvg4QqWx8rJ2Fxpqr1rqp/gQ= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= +k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca h1:6dsH6AYQWbyZmtttJNe8Gq1cXOeS1BdV3eW37zHilAQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go index 008185e1596..d8e9208e34d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go @@ -465,7 +465,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp } // ListBySubscription lists all availability sets in a subscription. -func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (result AvailabilitySetListResultPage, err error) { +// Parameters: +// expand - the expand expression to apply to the operation. +func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, expand string) (result AvailabilitySetListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -477,7 +479,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re }() } result.fn = client.listBySubscriptionNextResults - req, err := client.ListBySubscriptionPreparer(ctx) + req, err := client.ListBySubscriptionPreparer(ctx, expand) if err != nil { err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", nil, "Failure preparing request") return @@ -499,7 +501,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re } // ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { +func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -508,6 +510,9 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -559,7 +564,7 @@ func (client AvailabilitySetsClient) listBySubscriptionNextResults(ctx context.C } // ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. -func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context) (result AvailabilitySetListResultIterator, err error) { +func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context, expand string) (result AvailabilitySetListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -570,7 +575,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Cont tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListBySubscription(ctx) + result.page, err = client.ListBySubscription(ctx, expand) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go index 5f691d90cfa..8e8a1ec5ffe 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go @@ -1244,6 +1244,17 @@ type AutomaticOSUpgradeProperties struct { AutomaticOSUpgradeSupported *bool `json:"automaticOSUpgradeSupported,omitempty"` } +// AutomaticRepairsPolicy specifies the configuration parameters for automatic repairs on the virtual +// machine scale set. +type AutomaticRepairsPolicy struct { + // Enabled - Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false. + Enabled *bool `json:"enabled,omitempty"` + // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The default value is 5 minutes (PT5M). + GracePeriod *string `json:"gracePeriod,omitempty"` + // MaxInstanceRepairsPercent - The percentage (capacity of scaleset) of virtual machines that will be simultaneously repaired. The default value is 20%. + MaxInstanceRepairsPercent *int32 `json:"maxInstanceRepairsPercent,omitempty"` +} + // AvailabilitySet specifies information about the availability set that the virtual machine should be // assigned to. Virtual machines specified in the same availability set are allocated to different nodes to // maximize availability. For more information about availability sets, see [Manage the availability of @@ -8709,6 +8720,8 @@ type VirtualMachineScaleSetOSProfile struct { type VirtualMachineScaleSetProperties struct { // UpgradePolicy - The upgrade policy. UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // AutomaticRepairsPolicy - Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` // VirtualMachineProfile - The virtual machine profile. VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. @@ -9528,10 +9541,14 @@ type VirtualMachineScaleSetUpdateOSProfile struct { type VirtualMachineScaleSetUpdateProperties struct { // UpgradePolicy - The upgrade policy. UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // AutomaticRepairsPolicy - Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` // VirtualMachineProfile - The virtual machine profile. VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. Overprovision *bool `json:"overprovision,omitempty"` + // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs. + DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go index 715a2683736..0a51c651aeb 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go @@ -1270,7 +1270,7 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu return } -// Redeploy the operation to redeploy a virtual machine. +// Redeploy shuts down the virtual machine, moves it to a new node, and powers it back on. // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go index 78fb28af43d..0d1b297012c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go @@ -1245,7 +1245,8 @@ func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Respons return } -// Redeploy redeploy one or more virtual machines in a VM scale set. +// Redeploy shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers +// them back on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go index 60eeef78643..8c9375d700d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go @@ -641,7 +641,8 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Respo return } -// Redeploy redeploys a virtual machine in a VM scale set. +// Redeploy shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it back +// on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go index 8a6bbaa4a19..92945517338 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go @@ -85,7 +85,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -164,7 +164,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -240,7 +240,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +316,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -431,7 +431,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -465,7 +465,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp } // ListBySubscription lists all availability sets in a subscription. -func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (result AvailabilitySetListResultPage, err error) { +// Parameters: +// expand - the expand expression to apply to the operation. +func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, expand string) (result AvailabilitySetListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -477,7 +479,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re }() } result.fn = client.listBySubscriptionNextResults - req, err := client.ListBySubscriptionPreparer(ctx) + req, err := client.ListBySubscriptionPreparer(ctx, expand) if err != nil { err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", nil, "Failure preparing request") return @@ -499,15 +501,18 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re } // ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { +func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -559,7 +564,7 @@ func (client AvailabilitySetsClient) listBySubscriptionNextResults(ctx context.C } // ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. -func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context) (result AvailabilitySetListResultIterator, err error) { +func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context, expand string) (result AvailabilitySetListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -570,7 +575,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Cont tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListBySubscription(ctx) + result.page, err = client.ListBySubscription(ctx, expand) return } @@ -619,7 +624,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go index 77ec9da3137..c786ffefc3f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go @@ -98,7 +98,7 @@ func (client DedicatedHostGroupsClient) CreateOrUpdatePreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -177,7 +177,7 @@ func (client DedicatedHostGroupsClient) DeletePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -253,7 +253,7 @@ func (client DedicatedHostGroupsClient) GetPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -330,7 +330,7 @@ func (client DedicatedHostGroupsClient) ListByResourceGroupPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -441,7 +441,7 @@ func (client DedicatedHostGroupsClient) ListBySubscriptionPreparer(ctx context.C "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -556,7 +556,7 @@ func (client DedicatedHostGroupsClient) UpdatePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go index 31f8795d6fd..fb6ff59522b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go @@ -94,7 +94,7 @@ func (client DedicatedHostsClient) CreateOrUpdatePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -175,7 +175,7 @@ func (client DedicatedHostsClient) DeletePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -260,7 +260,7 @@ func (client DedicatedHostsClient) GetPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -342,7 +342,7 @@ func (client DedicatedHostsClient) ListByHostGroupPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -453,7 +453,7 @@ func (client DedicatedHostsClient) UpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go new file mode 100644 index 00000000000..cb93acdde87 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go @@ -0,0 +1,599 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiskEncryptionSetsClient is the compute Client +type DiskEncryptionSetsClient struct { + BaseClient +} + +// NewDiskEncryptionSetsClient creates an instance of the DiskEncryptionSetsClient client. +func NewDiskEncryptionSetsClient(subscriptionID string) DiskEncryptionSetsClient { + return NewDiskEncryptionSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiskEncryptionSetsClientWithBaseURI creates an instance of the DiskEncryptionSetsClient client. +func NewDiskEncryptionSetsClientWithBaseURI(baseURI string, subscriptionID string) DiskEncryptionSetsClient { + return DiskEncryptionSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk encryption set +// Parameters: +// resourceGroupName - the name of the resource group. +// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +// diskEncryptionSet - disk encryption set object supplied in the body of the Put disk encryption set +// operation. +func (client DiskEncryptionSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet) (result DiskEncryptionSetsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: diskEncryptionSet, + Constraints: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties.ActiveKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties.ActiveKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "diskEncryptionSet.EncryptionSetProperties.ActiveKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.DiskEncryptionSetsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DiskEncryptionSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters), + autorest.WithJSON(diskEncryptionSet), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) CreateOrUpdateSender(req *http.Request) (future DiskEncryptionSetsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) CreateOrUpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk encryption set. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +func (client DiskEncryptionSetsClient) Delete(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSetsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, diskEncryptionSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DiskEncryptionSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) DeleteSender(req *http.Request) (future DiskEncryptionSetsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about a disk encryption set. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +func (client DiskEncryptionSetsClient) Get(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, diskEncryptionSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DiskEncryptionSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) GetResponder(resp *http.Response) (result DiskEncryptionSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disk encryption sets under a subscription. +func (client DiskEncryptionSetsClient) List(ctx context.Context) (result DiskEncryptionSetListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.List") + defer func() { + sc := -1 + if result.desl.Response.Response != nil { + sc = result.desl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.desl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", resp, "Failure sending request") + return + } + + result.desl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DiskEncryptionSetsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) ListResponder(resp *http.Response) (result DiskEncryptionSetList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DiskEncryptionSetsClient) listNextResults(ctx context.Context, lastResults DiskEncryptionSetList) (result DiskEncryptionSetList, err error) { + req, err := lastResults.diskEncryptionSetListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskEncryptionSetsClient) ListComplete(ctx context.Context) (result DiskEncryptionSetListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists all the disk encryption sets under a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client DiskEncryptionSetsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskEncryptionSetListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.desl.Response.Response != nil { + sc = result.desl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.desl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.desl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DiskEncryptionSetsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) ListByResourceGroupResponder(resp *http.Response) (result DiskEncryptionSetList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client DiskEncryptionSetsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskEncryptionSetList) (result DiskEncryptionSetList, err error) { + req, err := lastResults.diskEncryptionSetListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskEncryptionSetsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskEncryptionSetListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// Update updates (patches) a disk encryption set. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +// diskEncryptionSet - disk encryption set object supplied in the body of the Patch disk encryption set +// operation. +func (client DiskEncryptionSetsClient) Update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate) (result DiskEncryptionSetsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client DiskEncryptionSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters), + autorest.WithJSON(diskEncryptionSet), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DiskEncryptionSetsClient) UpdateSender(req *http.Request) (future DiskEncryptionSetsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DiskEncryptionSetsClient) UpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go index 2dee9d74dad..4bf33668916 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go @@ -95,7 +95,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -177,7 +177,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -261,7 +261,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -341,7 +341,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -422,7 +422,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -535,7 +535,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -645,7 +645,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -724,7 +724,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go new file mode 100644 index 00000000000..c2c651209de --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go @@ -0,0 +1,401 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GalleryApplicationsClient is the compute Client +type GalleryApplicationsClient struct { + BaseClient +} + +// NewGalleryApplicationsClient creates an instance of the GalleryApplicationsClient client. +func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClient { + return NewGalleryApplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryApplicationsClientWithBaseURI creates an instance of the GalleryApplicationsClient client. +func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient { + return GalleryApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Application Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be +// created. +// galleryApplicationName - the name of the gallery Application Definition to be created or updated. The +// allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The +// maximum length is 80 characters. +// galleryApplication - parameters supplied to the create or update gallery Application operation. +func (client GalleryApplicationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (result GalleryApplicationsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters), + autorest.WithJSON(galleryApplication), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryApplicationsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery Application. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be +// deleted. +// galleryApplicationName - the name of the gallery Application Definition to be deleted. +func (client GalleryApplicationsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future GalleryApplicationsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Application Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery from which the Application Definitions are to be +// retrieved. +// galleryApplicationName - the name of the gallery Application Definition to be retrieved. +func (client GalleryApplicationsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplication, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryApplicationsClient) GetResponder(resp *http.Response) (result GalleryApplication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGallery list gallery Application Definitions in a gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery from which Application Definitions are to be +// listed. +func (client GalleryApplicationsClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery") + defer func() { + sc := -1 + if result.gal.Response.Response != nil { + sc = result.gal.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByGalleryNextResults + req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGallerySender(req) + if err != nil { + result.gal.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure sending request") + return + } + + result.gal, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryPreparer prepares the ListByGallery request. +func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGallerySender sends the ListByGallery request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationsClient) ListByGallerySender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByGalleryResponder handles the response to the ListByGallery request. The method always +// closes the http.Response Body. +func (client GalleryApplicationsClient) ListByGalleryResponder(resp *http.Response) (result GalleryApplicationList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryNextResults retrieves the next set of results, if any. +func (client GalleryApplicationsClient) listByGalleryNextResults(ctx context.Context, lastResults GalleryApplicationList) (result GalleryApplicationList, err error) { + req, err := lastResults.galleryApplicationListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGallerySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryApplicationsClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go new file mode 100644 index 00000000000..40b2dc830e9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go @@ -0,0 +1,428 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GalleryApplicationVersionsClient is the compute Client +type GalleryApplicationVersionsClient struct { + BaseClient +} + +// NewGalleryApplicationVersionsClient creates an instance of the GalleryApplicationVersionsClient client. +func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicationVersionsClient { + return NewGalleryApplicationVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryApplicationVersionsClientWithBaseURI creates an instance of the GalleryApplicationVersionsClient client. +func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient { + return GalleryApplicationVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Application Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - the name of the gallery Application Definition in which the Application Version is +// to be created. +// galleryApplicationVersionName - the name of the gallery Application Version to be created. Needs to follow +// semantic version name pattern: The allowed characters are digit and period. Digits must be within the range +// of a 32-bit integer. Format: .. +// galleryApplicationVersion - parameters supplied to the create or update gallery Application Version +// operation. +func (client GalleryApplicationVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (result GalleryApplicationVersionsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: galleryApplicationVersion, + Constraints: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source.FileName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source.MediaLink", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.GalleryApplicationVersionsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters), + autorest.WithJSON(galleryApplicationVersion), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationVersionsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryApplicationVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery Application Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - the name of the gallery Application Definition in which the Application Version +// resides. +// galleryApplicationVersionName - the name of the gallery Application Version to be deleted. +func (client GalleryApplicationVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (result GalleryApplicationVersionsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) (future GalleryApplicationVersionsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryApplicationVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Application Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - the name of the gallery Application Definition in which the Application Version +// resides. +// galleryApplicationVersionName - the name of the gallery Application Version to be retrieved. +// expand - the expand expression to apply on the operation. +func (client GalleryApplicationVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (result GalleryApplicationVersion, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryApplicationVersionsClient) GetResponder(resp *http.Response) (result GalleryApplicationVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGalleryApplication list gallery Application Versions in a gallery Application Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - the name of the Shared Application Gallery Application Definition from which the +// Application Versions are to be listed. +func (client GalleryApplicationVersionsClient) ListByGalleryApplication(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication") + defer func() { + sc := -1 + if result.gavl.Response.Response != nil { + sc = result.gavl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByGalleryApplicationNextResults + req, err := client.ListByGalleryApplicationPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGalleryApplicationSender(req) + if err != nil { + result.gavl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure sending request") + return + } + + result.gavl, err = client.ListByGalleryApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryApplicationPreparer prepares the ListByGalleryApplication request. +func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGalleryApplicationSender sends the ListByGalleryApplication request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationVersionsClient) ListByGalleryApplicationSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByGalleryApplicationResponder handles the response to the ListByGalleryApplication request. The method always +// closes the http.Response Body. +func (client GalleryApplicationVersionsClient) ListByGalleryApplicationResponder(resp *http.Response) (result GalleryApplicationVersionList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryApplicationNextResults retrieves the next set of results, if any. +func (client GalleryApplicationVersionsClient) listByGalleryApplicationNextResults(ctx context.Context, lastResults GalleryApplicationVersionList) (result GalleryApplicationVersionList, err error) { + req, err := lastResults.galleryApplicationVersionListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGalleryApplicationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryApplicationComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryApplicationVersionsClient) ListByGalleryApplicationComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByGalleryApplication(ctx, resourceGroupName, galleryName, galleryApplicationName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go index e15083ca448..dfb92b46b0f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go @@ -79,7 +79,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -158,7 +158,7 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -241,7 +241,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -318,7 +318,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -431,7 +431,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -540,7 +540,7 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go index 8fa8c1b0541..145d834d59b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go @@ -85,7 +85,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -170,7 +170,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go index b56b2ae1e59..11e0c0a1bd2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go @@ -270,19 +270,22 @@ func PossibleDiffDiskOptionsValues() []DiffDiskOptions { type DiskCreateOption string const ( - // Attach ... + // Attach Disk will be attached to a VM. Attach DiskCreateOption = "Attach" - // Copy ... + // Copy Create a new disk or snapshot by copying from a disk or snapshot specified by the given + // sourceResourceId. Copy DiskCreateOption = "Copy" - // Empty ... + // Empty Create an empty data disk of a size given by diskSizeGB. Empty DiskCreateOption = "Empty" - // FromImage ... + // FromImage Create a new disk from a platform image specified by the given imageReference. FromImage DiskCreateOption = "FromImage" - // Import ... + // Import Create a disk by importing from a blob specified by a sourceUri in a storage account specified by + // storageAccountId. Import DiskCreateOption = "Import" - // Restore ... + // Restore Create a new disk by copying from a backup recovery point. Restore DiskCreateOption = "Restore" - // Upload ... + // Upload Create a new disk by obtaining a write token and using it to directly upload the contents of the + // disk. Upload DiskCreateOption = "Upload" ) @@ -308,21 +311,34 @@ func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} } +// DiskEncryptionSetIdentityType enumerates the values for disk encryption set identity type. +type DiskEncryptionSetIdentityType string + +const ( + // SystemAssigned ... + SystemAssigned DiskEncryptionSetIdentityType = "SystemAssigned" +) + +// PossibleDiskEncryptionSetIdentityTypeValues returns an array of possible values for the DiskEncryptionSetIdentityType const type. +func PossibleDiskEncryptionSetIdentityTypeValues() []DiskEncryptionSetIdentityType { + return []DiskEncryptionSetIdentityType{SystemAssigned} +} + // DiskState enumerates the values for disk state. type DiskState string const ( - // ActiveSAS ... + // ActiveSAS The disk currently has an Active SAS Uri associated with it. ActiveSAS DiskState = "ActiveSAS" - // ActiveUpload ... + // ActiveUpload A disk is created for upload and a write token has been issued for uploading to it. ActiveUpload DiskState = "ActiveUpload" - // Attached ... + // Attached The disk is currently mounted to a running VM. Attached DiskState = "Attached" - // ReadyToUpload ... + // ReadyToUpload A disk is ready to be created by upload by requesting a write token. ReadyToUpload DiskState = "ReadyToUpload" - // Reserved ... + // Reserved The disk is mounted to a stopped-deallocated VM Reserved DiskState = "Reserved" - // Unattached ... + // Unattached The disk is not being used and can be attached to a VM. Unattached DiskState = "Unattached" ) @@ -335,13 +351,17 @@ func PossibleDiskStateValues() []DiskState { type DiskStorageAccountTypes string const ( - // PremiumLRS ... + // PremiumLRS Premium SSD locally redundant storage. Best for production and performance sensitive + // workloads. PremiumLRS DiskStorageAccountTypes = "Premium_LRS" - // StandardLRS ... + // StandardLRS Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent + // access. StandardLRS DiskStorageAccountTypes = "Standard_LRS" - // StandardSSDLRS ... + // StandardSSDLRS Standard SSD locally redundant storage. Best for web servers, lightly used enterprise + // applications and dev/test. StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS" - // UltraSSDLRS ... + // UltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top + // tier databases (for example, SQL, Oracle), and other transaction-heavy workloads. UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS" ) @@ -350,6 +370,22 @@ func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes { return []DiskStorageAccountTypes{PremiumLRS, StandardLRS, StandardSSDLRS, UltraSSDLRS} } +// EncryptionType enumerates the values for encryption type. +type EncryptionType string + +const ( + // EncryptionAtRestWithCustomerKey Disk is encrypted with Customer managed key at rest. + EncryptionAtRestWithCustomerKey EncryptionType = "EncryptionAtRestWithCustomerKey" + // EncryptionAtRestWithPlatformKey Disk is encrypted with XStore managed key at rest. It is the default + // encryption type. + EncryptionAtRestWithPlatformKey EncryptionType = "EncryptionAtRestWithPlatformKey" +) + +// PossibleEncryptionTypeValues returns an array of possible values for the EncryptionType const type. +func PossibleEncryptionTypeValues() []EncryptionType { + return []EncryptionType{EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformKey} +} + // HostCaching enumerates the values for host caching. type HostCaching string @@ -605,6 +641,29 @@ func PossibleProvisioningState2Values() []ProvisioningState2 { return []ProvisioningState2{ProvisioningState2Creating, ProvisioningState2Deleting, ProvisioningState2Failed, ProvisioningState2Migrating, ProvisioningState2Succeeded, ProvisioningState2Updating} } +// ProvisioningState3 enumerates the values for provisioning state 3. +type ProvisioningState3 string + +const ( + // ProvisioningState3Creating ... + ProvisioningState3Creating ProvisioningState3 = "Creating" + // ProvisioningState3Deleting ... + ProvisioningState3Deleting ProvisioningState3 = "Deleting" + // ProvisioningState3Failed ... + ProvisioningState3Failed ProvisioningState3 = "Failed" + // ProvisioningState3Migrating ... + ProvisioningState3Migrating ProvisioningState3 = "Migrating" + // ProvisioningState3Succeeded ... + ProvisioningState3Succeeded ProvisioningState3 = "Succeeded" + // ProvisioningState3Updating ... + ProvisioningState3Updating ProvisioningState3 = "Updating" +) + +// PossibleProvisioningState3Values returns an array of possible values for the ProvisioningState3 const type. +func PossibleProvisioningState3Values() []ProvisioningState3 { + return []ProvisioningState3{ProvisioningState3Creating, ProvisioningState3Deleting, ProvisioningState3Failed, ProvisioningState3Migrating, ProvisioningState3Succeeded, ProvisioningState3Updating} +} + // ProximityPlacementGroupType enumerates the values for proximity placement group type. type ProximityPlacementGroupType string @@ -771,11 +830,11 @@ func PossibleSettingNamesValues() []SettingNames { type SnapshotStorageAccountTypes string const ( - // SnapshotStorageAccountTypesPremiumLRS ... + // SnapshotStorageAccountTypesPremiumLRS Premium SSD locally redundant storage SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" - // SnapshotStorageAccountTypesStandardLRS ... + // SnapshotStorageAccountTypesStandardLRS Standard HDD locally redundant storage SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS" - // SnapshotStorageAccountTypesStandardZRS ... + // SnapshotStorageAccountTypesStandardZRS Standard zone redundant storage SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" ) @@ -918,6 +977,23 @@ func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { return []VirtualMachinePriorityTypes{Low, Regular} } +// VirtualMachineScaleSetScaleInRules enumerates the values for virtual machine scale set scale in rules. +type VirtualMachineScaleSetScaleInRules string + +const ( + // Default ... + Default VirtualMachineScaleSetScaleInRules = "Default" + // NewestVM ... + NewestVM VirtualMachineScaleSetScaleInRules = "NewestVM" + // OldestVM ... + OldestVM VirtualMachineScaleSetScaleInRules = "OldestVM" +) + +// PossibleVirtualMachineScaleSetScaleInRulesValues returns an array of possible values for the VirtualMachineScaleSetScaleInRules const type. +func PossibleVirtualMachineScaleSetScaleInRulesValues() []VirtualMachineScaleSetScaleInRules { + return []VirtualMachineScaleSetScaleInRules{Default, NewestVM, OldestVM} +} + // VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type. type VirtualMachineScaleSetSkuScaleType string @@ -1348,6 +1424,17 @@ type AutomaticOSUpgradeProperties struct { AutomaticOSUpgradeSupported *bool `json:"automaticOSUpgradeSupported,omitempty"` } +// AutomaticRepairsPolicy specifies the configuration parameters for automatic repairs on the virtual +// machine scale set. +type AutomaticRepairsPolicy struct { + // Enabled - Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false. + Enabled *bool `json:"enabled,omitempty"` + // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The default value is 5 minutes (PT5M). + GracePeriod *string `json:"gracePeriod,omitempty"` + // MaxInstanceRepairsPercent - The percentage (capacity of scaleset) of virtual machines that will be simultaneously repaired. The default value is 20%. + MaxInstanceRepairsPercent *int32 `json:"maxInstanceRepairsPercent,omitempty"` +} + // AvailabilitySet specifies information about the availability set that the virtual machine should be // assigned to. Virtual machines specified in the same availability set are allocated to different nodes to // maximize availability. For more information about availability sets, see [Manage the availability of @@ -1698,6 +1785,13 @@ func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error { return nil } +// BillingProfile specifies the billing related details of a low priority VM or VMSS.

Minimum +// api-version: 2019-03-01. +type BillingProfile struct { + // MaxPrice - Specifies the maximum price you are willing to pay for a low priority VM/VMSS. This price is in US Dollars.

This price will be compared with the current low priority price for the VM size. Also, the prices are compared at the time of create/update of low priority VM/VMSS and the operation will only succeed if the maxPrice is greater than the current low priority price.

The maxPrice will also be used for evicting a low priority VM/VMSS if the current low priority price goes beyond the maxPrice after creation of VM/VMSS.

Possible values are:

- Any decimal value greater than zero. Example: $0.01538

-1 – indicates default price to be up-to on-demand.

You can set the maxPrice to -1 to indicate that the low priority VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.

Minimum api-version: 2019-03-01. + MaxPrice *float64 `json:"maxPrice,omitempty"` +} + // BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and // Screenshot to diagnose VM status.

You can easily view the output of your console log.

// Azure also enables you to see a screenshot of the VM from the hypervisor. @@ -1718,7 +1812,7 @@ type BootDiagnosticsInstanceView struct { Status *InstanceViewStatus `json:"status,omitempty"` } -// CloudError an error response from the Gallery service. +// CloudError an error response from the Compute service. type CloudError struct { Error *APIError `json:"error,omitempty"` } @@ -2142,6 +2236,10 @@ type CreationData struct { SourceURI *string `json:"sourceUri,omitempty"` // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. SourceResourceID *string `json:"sourceResourceId,omitempty"` + // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource. + SourceUniqueID *string `json:"sourceUniqueId,omitempty"` + // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"` } // DataDisk describes a data disk. @@ -2166,6 +2264,10 @@ type DataDisk struct { ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` // ToBeDetached - Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset ToBeDetached *bool `json:"toBeDetached,omitempty"` + // DiskIOPSReadWrite - READ-ONLY; Specifies the Read-Write IOPS for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` } // DataDiskImage contains the data disk images information. @@ -3108,46 +3210,139 @@ func (d *Disk) UnmarshalJSON(body []byte) error { return nil } -// DiskEncryptionSettings describes a Encryption Settings for a Disk -type DiskEncryptionSettings struct { - // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. - DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. - KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` - // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. - Enabled *bool `json:"enabled,omitempty"` +// DiskEncryptionSet disk encryption set resource. +type DiskEncryptionSet struct { + autorest.Response `json:"-"` + Identity *EncryptionSetIdentity `json:"identity,omitempty"` + *EncryptionSetProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` } -// DiskInstanceView the instance view of the disk. -type DiskInstanceView struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +// MarshalJSON is the custom marshaler for DiskEncryptionSet. +func (desVar DiskEncryptionSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if desVar.Identity != nil { + objectMap["identity"] = desVar.Identity + } + if desVar.EncryptionSetProperties != nil { + objectMap["properties"] = desVar.EncryptionSetProperties + } + if desVar.Location != nil { + objectMap["location"] = desVar.Location + } + if desVar.Tags != nil { + objectMap["tags"] = desVar.Tags + } + return json.Marshal(objectMap) } -// DiskList the List Disks operation response. -type DiskList struct { +// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSet struct. +func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "identity": + if v != nil { + var identity EncryptionSetIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + desVar.Identity = &identity + } + case "properties": + if v != nil { + var encryptionSetProperties EncryptionSetProperties + err = json.Unmarshal(*v, &encryptionSetProperties) + if err != nil { + return err + } + desVar.EncryptionSetProperties = &encryptionSetProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + desVar.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + desVar.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + desVar.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + desVar.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + desVar.Tags = tags + } + } + } + + return nil +} + +// DiskEncryptionSetList the List disk encryption set operation response. +type DiskEncryptionSetList struct { autorest.Response `json:"-"` - // Value - A list of disks. - Value *[]Disk `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + // Value - A list of disk encryption sets. + Value *[]DiskEncryptionSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disk encryption sets. Call ListNext() with this to fetch the next page of disk encryption sets. NextLink *string `json:"nextLink,omitempty"` } -// DiskListIterator provides access to a complete listing of Disk values. -type DiskListIterator struct { +// DiskEncryptionSetListIterator provides access to a complete listing of DiskEncryptionSet values. +type DiskEncryptionSetListIterator struct { i int - page DiskListPage + page DiskEncryptionSetListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *DiskEncryptionSetListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -3172,62 +3367,62 @@ func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DiskListIterator) Next() error { +func (iter *DiskEncryptionSetListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskListIterator) NotDone() bool { +func (iter DiskEncryptionSetListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DiskListIterator) Response() DiskList { +func (iter DiskEncryptionSetListIterator) Response() DiskEncryptionSetList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DiskListIterator) Value() Disk { +func (iter DiskEncryptionSetListIterator) Value() DiskEncryptionSet { if !iter.page.NotDone() { - return Disk{} + return DiskEncryptionSet{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the DiskListIterator type. -func NewDiskListIterator(page DiskListPage) DiskListIterator { - return DiskListIterator{page: page} +// Creates a new instance of the DiskEncryptionSetListIterator type. +func NewDiskEncryptionSetListIterator(page DiskEncryptionSetListPage) DiskEncryptionSetListIterator { + return DiskEncryptionSetListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (dl DiskList) IsEmpty() bool { - return dl.Value == nil || len(*dl.Value) == 0 +func (desl DiskEncryptionSetList) IsEmpty() bool { + return desl.Value == nil || len(*desl.Value) == 0 } -// diskListPreparer prepares a request to retrieve the next set of results. +// diskEncryptionSetListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) { - if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { +func (desl DiskEncryptionSetList) diskEncryptionSetListPreparer(ctx context.Context) (*http.Request, error) { + if desl.NextLink == nil || len(to.String(desl.NextLink)) < 1 { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dl.NextLink))) + autorest.WithBaseURL(to.String(desl.NextLink))) } -// DiskListPage contains a page of Disk values. -type DiskListPage struct { - fn func(context.Context, DiskList) (DiskList, error) - dl DiskList +// DiskEncryptionSetListPage contains a page of DiskEncryptionSet values. +type DiskEncryptionSetListPage struct { + fn func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error) + desl DiskEncryptionSetList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { +func (page *DiskEncryptionSetListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -3236,177 +3431,495 @@ func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { tracing.EndSpan(ctx, sc, err) }() } - next, err := page.fn(ctx, page.dl) + next, err := page.fn(ctx, page.desl) if err != nil { return err } - page.dl = next + page.desl = next return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DiskListPage) Next() error { +func (page *DiskEncryptionSetListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskListPage) NotDone() bool { - return !page.dl.IsEmpty() +func (page DiskEncryptionSetListPage) NotDone() bool { + return !page.desl.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DiskListPage) Response() DiskList { - return page.dl +func (page DiskEncryptionSetListPage) Response() DiskEncryptionSetList { + return page.desl } // Values returns the slice of values for the current page or nil if there are no values. -func (page DiskListPage) Values() []Disk { - if page.dl.IsEmpty() { +func (page DiskEncryptionSetListPage) Values() []DiskEncryptionSet { + if page.desl.IsEmpty() { return nil } - return *page.dl.Value + return *page.desl.Value } -// Creates a new instance of the DiskListPage type. -func NewDiskListPage(getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { - return DiskListPage{fn: getNextPage} +// Creates a new instance of the DiskEncryptionSetListPage type. +func NewDiskEncryptionSetListPage(getNextPage func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error)) DiskEncryptionSetListPage { + return DiskEncryptionSetListPage{fn: getNextPage} } -// DiskProperties disk resource properties. -type DiskProperties struct { - // TimeCreated - READ-ONLY; The time when the disk was created. - TimeCreated *date.Time `json:"timeCreated,omitempty"` - // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' - HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` - // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. - CreationData *CreationData `json:"creationData,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. - EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` - // ProvisioningState - READ-ONLY; The disk provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` - // DiskState - READ-ONLY; The state of the disk. Possible values include: 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', 'ActiveUpload' - DiskState DiskState `json:"diskState,omitempty"` +// DiskEncryptionSetParameters describes the parameter of customer managed disk encryption set resource id +// that can be specified for disk.

NOTE: The disk encryption set resource id can only be specified +// for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details. +type DiskEncryptionSetParameters struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` } -// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksCreateOrUpdateFuture struct { +// DiskEncryptionSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsCreateOrUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { +func (future *DiskEncryptionSetsCreateOrUpdateFuture) Result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") } } return } -// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksDeleteFuture struct { +// DiskEncryptionSetsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { +func (future *DiskEncryptionSetsDeleteFuture) Result(client DiskEncryptionSetsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture") return } ar.Response = future.Response() return } -// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksGrantAccessFuture struct { +// DiskEncryptionSetsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { +func (future *DiskEncryptionSetsUpdateFuture) Result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.UpdateResponder(desVar.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") } } return } -// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. -type DiskSku struct { - // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' - Name DiskStorageAccountTypes `json:"name,omitempty"` - // Tier - READ-ONLY; The sku tier. - Tier *string `json:"tier,omitempty"` +// DiskEncryptionSettings describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` } -// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksRevokeAccessFuture struct { - azure.Future +// DiskEncryptionSetUpdate disk encryption set update resource. +type DiskEncryptionSetUpdate struct { + *DiskEncryptionSetUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` } -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return +// MarshalJSON is the custom marshaler for DiskEncryptionSetUpdate. +func (desu DiskEncryptionSetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if desu.DiskEncryptionSetUpdateProperties != nil { + objectMap["properties"] = desu.DiskEncryptionSetUpdateProperties } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") - return + if desu.Tags != nil { + objectMap["tags"] = desu.Tags } - ar.Response = future.Response() - return + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSetUpdate struct. +func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskEncryptionSetUpdateProperties DiskEncryptionSetUpdateProperties + err = json.Unmarshal(*v, &diskEncryptionSetUpdateProperties) + if err != nil { + return err + } + desu.DiskEncryptionSetUpdateProperties = &diskEncryptionSetUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + desu.Tags = tags + } + } + } + + return nil +} + +// DiskEncryptionSetUpdateProperties disk encryption set resource update properties. +type DiskEncryptionSetUpdateProperties struct { + ActiveKey *KeyVaultAndKeyReference `json:"activeKey,omitempty"` +} + +// DiskInstanceView the instance view of the disk. +type DiskInstanceView struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskListIterator type. +func NewDiskListIterator(page DiskListPage) DiskListIterator { + return DiskListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) { + if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(context.Context, DiskList) (DiskList, error) + dl DiskList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// Creates a new instance of the DiskListPage type. +func NewDiskListPage(getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { + return DiskListPage{fn: getNextPage} +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - READ-ONLY; The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only. + DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"` + // UniqueID - READ-ONLY; Unique Guid identifying the resource. + UniqueID *string `json:"uniqueId,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // ProvisioningState - READ-ONLY; The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` + // DiskState - READ-ONLY; The state of the disk. Possible values include: 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', 'ActiveUpload' + DiskState DiskState `json:"diskState,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' + Name DiskStorageAccountTypes `json:"name,omitempty"` + // Tier - READ-ONLY; The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return } // DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. @@ -3416,52 +3929,654 @@ type DisksUpdateFuture struct { // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { +func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdate. +func (du DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if du.DiskUpdateProperties != nil { + objectMap["properties"] = du.DiskUpdateProperties + } + if du.Tags != nil { + objectMap["tags"] = du.Tags + } + if du.Sku != nil { + objectMap["sku"] = du.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. +func (du *DiskUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + du.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + du.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + du.Sku = &sku + } + } + } + + return nil +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` +} + +// Encryption encryption at rest settings for disk or snapshot +type Encryption struct { + // DiskEncryptionSetID - ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` + // Type - The type of key used to encrypt the data of the disk. Possible values include: 'EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey' + Type EncryptionType `json:"type,omitempty"` +} + +// EncryptionSetIdentity the managed identity for the disk encryption set. It should be given permission on +// the key vault before it can be used to encrypt disks. +type EncryptionSetIdentity struct { + // Type - The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported. Possible values include: 'SystemAssigned' + Type DiskEncryptionSetIdentityType `json:"type,omitempty"` + // PrincipalID - READ-ONLY; The object id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a systemAssigned(implicit) identity + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a systemAssigned(implicit) identity + TenantID *string `json:"tenantId,omitempty"` +} + +// EncryptionSetProperties ... +type EncryptionSetProperties struct { + // ActiveKey - The key vault key which is currently used by this disk encryption set. + ActiveKey *KeyVaultAndKeyReference `json:"activeKey,omitempty"` + // PreviousKeys - READ-ONLY; A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is in progress. It will be empty if there is no ongoing key rotation. + PreviousKeys *[]KeyVaultAndKeyReference `json:"previousKeys,omitempty"` + // ProvisioningState - READ-ONLY; The disk encryption set provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// EncryptionSettingsCollection encryption settings for disk or snapshot +type EncryptionSettingsCollection struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // EncryptionSettings - A collection of encryption settings, one for each disk volume. + EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"` + // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. + EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"` +} + +// EncryptionSettingsElement encryption settings for one disk volume. +type EncryptionSettingsElement struct { + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// Gallery specifies information about the Shared Image Gallery that you want to create or update. +type Gallery struct { + autorest.Response `json:"-"` + *GalleryProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Gallery. +func (g Gallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.GalleryProperties != nil { + objectMap["properties"] = g.GalleryProperties + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Gallery struct. +func (g *Gallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryProperties GalleryProperties + err = json.Unmarshal(*v, &galleryProperties) + if err != nil { + return err + } + g.GalleryProperties = &galleryProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + g.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + g.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + g.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + g.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + g.Tags = tags + } + } + } + + return nil +} + +// GalleryApplication specifies information about the gallery Application Definition that you want to +// create or update. +type GalleryApplication struct { + autorest.Response `json:"-"` + *GalleryApplicationProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryApplication. +func (ga GalleryApplication) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ga.GalleryApplicationProperties != nil { + objectMap["properties"] = ga.GalleryApplicationProperties + } + if ga.Location != nil { + objectMap["location"] = ga.Location + } + if ga.Tags != nil { + objectMap["tags"] = ga.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryApplication struct. +func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryApplicationProperties GalleryApplicationProperties + err = json.Unmarshal(*v, &galleryApplicationProperties) + if err != nil { + return err + } + ga.GalleryApplicationProperties = &galleryApplicationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ga.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ga.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ga.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + ga.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + ga.Tags = tags + } + } + } + + return nil +} + +// GalleryApplicationList the List Gallery Applications operation response. +type GalleryApplicationList struct { + autorest.Response `json:"-"` + // Value - A list of Gallery Applications. + Value *[]GalleryApplication `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryApplicationListIterator provides access to a complete listing of GalleryApplication values. +type GalleryApplicationListIterator struct { + i int + page GalleryApplicationListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GalleryApplicationListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryApplicationListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryApplicationListIterator) Response() GalleryApplicationList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryApplicationListIterator) Value() GalleryApplication { + if !iter.page.NotDone() { + return GalleryApplication{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the GalleryApplicationListIterator type. +func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator { + return GalleryApplicationListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (gal GalleryApplicationList) IsEmpty() bool { + return gal.Value == nil || len(*gal.Value) == 0 +} + +// galleryApplicationListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gal GalleryApplicationList) galleryApplicationListPreparer(ctx context.Context) (*http.Request, error) { + if gal.NextLink == nil || len(to.String(gal.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gal.NextLink))) +} + +// GalleryApplicationListPage contains a page of GalleryApplication values. +type GalleryApplicationListPage struct { + fn func(context.Context, GalleryApplicationList) (GalleryApplicationList, error) + gal GalleryApplicationList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.gal) + if err != nil { + return err + } + page.gal = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GalleryApplicationListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryApplicationListPage) NotDone() bool { + return !page.gal.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryApplicationListPage) Response() GalleryApplicationList { + return page.gal +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryApplicationListPage) Values() []GalleryApplication { + if page.gal.IsEmpty() { + return nil + } + return *page.gal.Value +} + +// Creates a new instance of the GalleryApplicationListPage type. +func NewGalleryApplicationListPage(getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage { + return GalleryApplicationListPage{fn: getNextPage} +} + +// GalleryApplicationProperties describes the properties of a gallery Application Definition. +type GalleryApplicationProperties struct { + // Description - The description of this gallery Application Definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + // Eula - The Eula agreement for the gallery Application Definition. + Eula *string `json:"eula,omitempty"` + // PrivacyStatementURI - The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + // ReleaseNoteURI - The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // SupportedOSType - This property allows you to specify the supported type of the OS that application is built for.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' + SupportedOSType OperatingSystemTypes `json:"supportedOSType,omitempty"` +} + +// GalleryApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryApplicationsCreateOrUpdateFuture) Result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.CreateOrUpdateResponder(ga.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") } } return } -// DiskUpdate disk update resource. -type DiskUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` +// GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryApplicationsDeleteFuture) Result(client GalleryApplicationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryApplicationVersion specifies information about the gallery Application Version that you want to +// create or update. +type GalleryApplicationVersion struct { + autorest.Response `json:"-"` + *GalleryApplicationVersionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` - Sku *DiskSku `json:"sku,omitempty"` } -// MarshalJSON is the custom marshaler for DiskUpdate. -func (du DiskUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationVersion. +func (gav GalleryApplicationVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if du.DiskUpdateProperties != nil { - objectMap["properties"] = du.DiskUpdateProperties + if gav.GalleryApplicationVersionProperties != nil { + objectMap["properties"] = gav.GalleryApplicationVersionProperties } - if du.Tags != nil { - objectMap["tags"] = du.Tags + if gav.Location != nil { + objectMap["location"] = gav.Location } - if du.Sku != nil { - objectMap["sku"] = du.Sku + if gav.Tags != nil { + objectMap["tags"] = gav.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. -func (du *DiskUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersion struct. +func (gav *GalleryApplicationVersion) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -3471,30 +4586,57 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) + var galleryApplicationVersionProperties GalleryApplicationVersionProperties + err = json.Unmarshal(*v, &galleryApplicationVersionProperties) if err != nil { return err } - du.DiskUpdateProperties = &diskUpdateProperties + gav.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties } - case "tags": + case "id": if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) + var ID string + err = json.Unmarshal(*v, &ID) if err != nil { return err } - du.Tags = tags + gav.ID = &ID } - case "sku": + case "name": if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) + var name string + err = json.Unmarshal(*v, &name) if err != nil { return err } - du.Sku = &sku + gav.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + gav.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + gav.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gav.Tags = tags } } } @@ -3502,192 +4644,254 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { return nil } -// DiskUpdateProperties disk resource update properties. -type DiskUpdateProperties struct { - // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. - EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` - // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` +// GalleryApplicationVersionList the List Gallery Application version operation response. +type GalleryApplicationVersionList struct { + autorest.Response `json:"-"` + // Value - A list of gallery Application Versions. + Value *[]GalleryApplicationVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery Application Versions. + NextLink *string `json:"nextLink,omitempty"` } -// EncryptionSettingsCollection encryption settings for disk or snapshot -type EncryptionSettingsCollection struct { - // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. - Enabled *bool `json:"enabled,omitempty"` - // EncryptionSettings - A collection of encryption settings, one for each disk volume. - EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"` +// GalleryApplicationVersionListIterator provides access to a complete listing of GalleryApplicationVersion +// values. +type GalleryApplicationVersionListIterator struct { + i int + page GalleryApplicationVersionListPage } -// EncryptionSettingsElement encryption settings for one disk volume. -type EncryptionSettingsElement struct { - // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key - DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. - KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GalleryApplicationVersionListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryApplicationVersionListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryApplicationVersionListIterator) Response() GalleryApplicationVersionList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryApplicationVersionListIterator) Value() GalleryApplicationVersion { + if !iter.page.NotDone() { + return GalleryApplicationVersion{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the GalleryApplicationVersionListIterator type. +func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator { + return GalleryApplicationVersionListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (gavl GalleryApplicationVersionList) IsEmpty() bool { + return gavl.Value == nil || len(*gavl.Value) == 0 +} + +// galleryApplicationVersionListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gavl GalleryApplicationVersionList) galleryApplicationVersionListPreparer(ctx context.Context) (*http.Request, error) { + if gavl.NextLink == nil || len(to.String(gavl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gavl.NextLink))) +} + +// GalleryApplicationVersionListPage contains a page of GalleryApplicationVersion values. +type GalleryApplicationVersionListPage struct { + fn func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error) + gavl GalleryApplicationVersionList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.gavl) + if err != nil { + return err + } + page.gavl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GalleryApplicationVersionListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryApplicationVersionListPage) NotDone() bool { + return !page.gavl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryApplicationVersionListPage) Response() GalleryApplicationVersionList { + return page.gavl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryApplicationVersionListPage) Values() []GalleryApplicationVersion { + if page.gavl.IsEmpty() { + return nil + } + return *page.gavl.Value +} + +// Creates a new instance of the GalleryApplicationVersionListPage type. +func NewGalleryApplicationVersionListPage(getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage { + return GalleryApplicationVersionListPage{fn: getNextPage} +} + +// GalleryApplicationVersionProperties describes the properties of a gallery Image Version. +type GalleryApplicationVersionProperties struct { + PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' + ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` + // ReplicationStatus - READ-ONLY + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` +} + +// GalleryApplicationVersionPublishingProfile the publishing profile of a gallery Image Version. +type GalleryApplicationVersionPublishingProfile struct { + Source *UserArtifactSource `json:"source,omitempty"` + // ContentType - Optional. May be used to help process this file. The type of file contained in the source, e.g. zip, json, etc. + ContentType *string `json:"contentType,omitempty"` + // EnableHealthCheck - Optional. Whether or not this application reports health. + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"` + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` } -// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleriesCreateOrUpdateFuture struct { +// GalleryApplicationVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type GalleryApplicationVersionsCreateOrUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) { +func (future *GalleryApplicationVersionsCreateOrUpdateFuture) Result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.CreateOrUpdateResponder(g.Response.Response) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.CreateOrUpdateResponder(gav.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") } } return } -// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleriesDeleteFuture struct { +// GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationVersionsDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) { +func (future *GalleryApplicationVersionsDeleteFuture) Result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture") return } ar.Response = future.Response() return } -// Gallery specifies information about the Shared Image Gallery that you want to create or update. -type Gallery struct { - autorest.Response `json:"-"` - *GalleryProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Gallery. -func (g Gallery) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if g.GalleryProperties != nil { - objectMap["properties"] = g.GalleryProperties - } - if g.Location != nil { - objectMap["location"] = g.Location - } - if g.Tags != nil { - objectMap["tags"] = g.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Gallery struct. -func (g *Gallery) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var galleryProperties GalleryProperties - err = json.Unmarshal(*v, &galleryProperties) - if err != nil { - return err - } - g.GalleryProperties = &galleryProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - g.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - g.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - g.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - g.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - g.Tags = tags - } - } - } - - return nil -} - // GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. type GalleryArtifactPublishingProfileBase struct { // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` +} + +// GalleryArtifactSource the source image from which the Image Version is going to be created. +type GalleryArtifactSource struct { + ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` } // GalleryArtifactVersionSource the gallery artifact version source. @@ -4000,8 +5204,8 @@ type GalleryImageProperties struct { Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` Disallowed *Disallowed `json:"disallowed,omitempty"` PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' - ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' + ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` } // GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a @@ -4306,8 +5510,8 @@ func NewGalleryImageVersionListPage(getNextPage func(context.Context, GalleryIma // GalleryImageVersionProperties describes the properties of a gallery Image Version. type GalleryImageVersionProperties struct { PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' - ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState3Creating', 'ProvisioningState3Updating', 'ProvisioningState3Failed', 'ProvisioningState3Succeeded', 'ProvisioningState3Deleting', 'ProvisioningState3Migrating' + ProvisioningState ProvisioningState3 `json:"provisioningState,omitempty"` StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` // ReplicationStatus - READ-ONLY ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` @@ -4315,6 +5519,8 @@ type GalleryImageVersionProperties struct { // GalleryImageVersionPublishingProfile the publishing profile of a gallery Image Version. type GalleryImageVersionPublishingProfile struct { + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. ReplicaCount *int32 `json:"replicaCount,omitempty"` // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. @@ -4325,8 +5531,6 @@ type GalleryImageVersionPublishingProfile struct { EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS' StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` - // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. - TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` } // GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a @@ -4685,6 +5889,26 @@ type ImageDataDisk struct { DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// ImageDisk describes a image disk. +type ImageDisk struct { + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` } // ImageDiskReference the source image used for creating the disk. @@ -4857,8 +6081,10 @@ type ImageOSDisk struct { Caching CachingTypes `json:"caching,omitempty"` // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. UltraSSD_LRS cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` } // ImageProperties describes the properties of an Image. @@ -5374,10 +6600,18 @@ type MaintenanceRedeployStatus struct { LastOperationMessage *string `json:"lastOperationMessage,omitempty"` } +// ManagedArtifact the managed artifact. +type ManagedArtifact struct { + // ID - The managed artifact id. + ID *string `json:"id,omitempty"` +} + // ManagedDiskParameters the parameters of a managed disk. type ManagedDiskParameters struct { // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` } @@ -5577,6 +6811,8 @@ type OSProfile struct { Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

This may only be set to False when no extensions are present on the virtual machine. AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` + // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required from the virtual machine. + RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"` } // Plan specifies information about the marketplace image used to create the virtual machine. This element @@ -6587,6 +7823,18 @@ type RunCommandResult struct { Value *[]InstanceViewStatus `json:"value,omitempty"` } +// ScaleInPolicy describes a scale-in policy for a virtual machine scale set. +type ScaleInPolicy struct { + // Rules - The rules to be followed when scaling-in a virtual machine scale set.

Possible values are:

**Default** When a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in.

**OldestVM** When a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will be chosen for removal.

**NewestVM** When a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will be chosen for removal.

+ Rules *[]VirtualMachineScaleSetScaleInRules `json:"rules,omitempty"` +} + +// ScheduledEventsProfile ... +type ScheduledEventsProfile struct { + // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations. + TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"` +} + // Sku describes a virtual machine scale set sku. type Sku struct { // Name - The sku name. @@ -6877,12 +8125,20 @@ type SnapshotProperties struct { HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. CreationData *CreationData `json:"creationData,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only. + DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"` + // UniqueID - READ-ONLY; Unique Guid identifying the resource. + UniqueID *string `json:"uniqueId,omitempty"` // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` // ProvisioningState - READ-ONLY; The disk provisioning state. ProvisioningState *string `json:"provisioningState,omitempty"` + // Incremental - Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed. + Incremental *bool `json:"incremental,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` } // SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running @@ -7095,7 +8351,7 @@ func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { type SnapshotUpdateProperties struct { // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' OsType OperatingSystemTypes `json:"osType,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` @@ -7155,6 +8411,14 @@ type TargetRegion struct { StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` } +// TerminateNotificationProfile ... +type TerminateNotificationProfile struct { + // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M) + NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"` + // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled. + Enable *bool `json:"enable,omitempty"` +} + // ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. type ThrottledRequestsInput struct { // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. @@ -7253,6 +8517,14 @@ type UsageName struct { LocalizedValue *string `json:"localizedValue,omitempty"` } +// UserArtifactSource the source image from which the Image Version is going to be created. +type UserArtifactSource struct { + // FileName - Required. The fileName of the artifact. + FileName *string `json:"fileName,omitempty"` + // MediaLink - Required. The mediaLink of the artifact, must be a readable storage blob. + MediaLink *string `json:"mediaLink,omitempty"` +} + // VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate // should reside on the VM. type VaultCertificate struct { @@ -8021,6 +9293,8 @@ type VirtualMachineImageProperties struct { OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"` + // HyperVGeneration - Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2' + HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"` } // VirtualMachineImageResource virtual machine image resource information. @@ -8244,10 +9518,18 @@ type VirtualMachineProperties struct { NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.

This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.

This property cannot exist along with a non-null properties.availabilitySet reference.

Minimum api‐version: 2019‐03‐01 + VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"` // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.

Minimum api-version: 2018-04-01. ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` + // Priority - Specifies the priority for the virtual machine.

Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low' + Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` + // EvictionPolicy - Specifies the eviction policy for the low priority virtual machine. Only supported value is 'Deallocate'.

Minimum api-version: 2019-03-01. Possible values include: 'Deallocate', 'Delete' + EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` + // BillingProfile - Specifies the billing related details of a low priority virtual machine.

Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // Host - Specifies information about the dedicated host that the virtual machine resides in.

Minimum api-version: 2018-10-01. Host *SubResource `json:"host,omitempty"` // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. @@ -8439,6 +9721,10 @@ type VirtualMachineScaleSetDataDisk struct { DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // ManagedDisk - The managed disk parameters. ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` + // DiskIOPSReadWrite - Specifies the Read-Write IOPS for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - Specifies the bandwidth in MB per second for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` } // VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension. @@ -9473,6 +10759,8 @@ func NewVirtualMachineScaleSetListWithLinkResultPage(getNextPage func(context.Co type VirtualMachineScaleSetManagedDiskParameters struct { // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` } // VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's @@ -9620,6 +10908,8 @@ type VirtualMachineScaleSetOSProfile struct { type VirtualMachineScaleSetProperties struct { // UpgradePolicy - The upgrade policy. UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // AutomaticRepairsPolicy - Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` // VirtualMachineProfile - The virtual machine profile. VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. @@ -9640,6 +10930,8 @@ type VirtualMachineScaleSetProperties struct { ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // ScaleInPolicy - Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in. + ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` } // VirtualMachineScaleSetPublicIPAddressConfiguration describes a virtual machines scale set IP @@ -9713,6 +11005,8 @@ type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"` // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses. PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` + // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' + PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"` } // VirtualMachineScaleSetReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters. @@ -10404,6 +11698,8 @@ type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { // VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile. type VirtualMachineScaleSetUpdateNetworkProfile struct { + // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` // NetworkInterfaceConfigurations - The list of network configurations. NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` } @@ -10441,14 +11737,20 @@ type VirtualMachineScaleSetUpdateOSProfile struct { type VirtualMachineScaleSetUpdateProperties struct { // UpgradePolicy - The upgrade policy. UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // AutomaticRepairsPolicy - Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` // VirtualMachineProfile - The virtual machine profile. VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. Overprovision *bool `json:"overprovision,omitempty"` + // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs. + DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // ScaleInPolicy - Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in. + ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` } // VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP @@ -10537,6 +11839,10 @@ type VirtualMachineScaleSetUpdateVMProfile struct { ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` // LicenseType - The license type, which is for bring your own license scenario. LicenseType *string `json:"licenseType,omitempty"` + // BillingProfile - Specifies the billing related details of a low priority VMSS.

Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` + // ScheduledEventsProfile - Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` } // VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine. @@ -10697,6 +12003,58 @@ func (vmssv *VirtualMachineScaleSetVM) UnmarshalJSON(body []byte) error { return nil } +// VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetVMExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.CreateOrUpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetVMExtensionsDeleteFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type VirtualMachineScaleSetVMExtensionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMExtensionsDeleteFuture) Result(client VirtualMachineScaleSetVMExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMExtensionsSummary extensions summary for virtual machines of a virtual machine // scale set. type VirtualMachineScaleSetVMExtensionsSummary struct { @@ -10706,6 +12064,35 @@ type VirtualMachineScaleSetVMExtensionsSummary struct { StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` } +// VirtualMachineScaleSetVMExtensionsUpdateFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type VirtualMachineScaleSetVMExtensionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMExtensionsUpdateFuture) Result(client VirtualMachineScaleSetVMExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.UpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale // set. type VirtualMachineScaleSetVMInstanceIDs struct { @@ -10919,6 +12306,10 @@ type VirtualMachineScaleSetVMProfile struct { Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` // EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.

Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` + // BillingProfile - Specifies the billing related details of a low priority VMSS.

Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` + // ScheduledEventsProfile - Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` } // VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual @@ -11431,6 +12822,29 @@ func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient return } +// VirtualMachinesReapplyFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesReapplyFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesReapplyFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReapplyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReapplyFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRedeployFuture struct { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go index c19b7567d7d..021b16328e7 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go @@ -75,7 +75,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go index a85c1640cd4..a42d5268556 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go @@ -85,7 +85,7 @@ func (client ProximityPlacementGroupsClient) CreateOrUpdatePreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -164,7 +164,7 @@ func (client ProximityPlacementGroupsClient) DeletePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -240,7 +240,7 @@ func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +316,7 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroupPreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -426,7 +426,7 @@ func (client ProximityPlacementGroupsClient) ListBySubscriptionPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -541,7 +541,7 @@ func (client ProximityPlacementGroupsClient) UpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go index dade057a45f..adb402788c8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go @@ -41,7 +41,9 @@ func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) Res } // List gets the list of Microsoft.Compute SKUs available for your Subscription. -func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) { +// Parameters: +// filter - the filter to apply on the operation. +func (client ResourceSkusClient) List(ctx context.Context, filter string) (result ResourceSkusResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") defer func() { @@ -53,7 +55,7 @@ func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusR }() } result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) + req, err := client.ListPreparer(ctx, filter) if err != nil { err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") return @@ -75,7 +77,7 @@ func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusR } // ListPreparer prepares the List request. -func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { +func (client ResourceSkusClient) ListPreparer(ctx context.Context, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -84,6 +86,9 @@ func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Reques queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -135,7 +140,7 @@ func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResult } // ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) { +func (client ResourceSkusClient) ListComplete(ctx context.Context, filter string) (result ResourceSkusResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") defer func() { @@ -146,6 +151,6 @@ func (client ResourceSkusClient) ListComplete(ctx context.Context) (result Resou tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.List(ctx) + result.page, err = client.List(ctx, filter) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go index dc153b65164..3a52fe87388 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go @@ -94,7 +94,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -175,7 +175,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -258,7 +258,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -337,7 +337,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -418,7 +418,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -531,7 +531,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -640,7 +640,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -718,7 +718,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-09-30" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go index 36565c1a55c..690136c06d5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go @@ -91,7 +91,7 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go index 94adf706bf3..7a670a607c8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go @@ -86,7 +86,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex "version": autorest.Encode("path", version), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -162,7 +162,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -240,7 +240,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte "type": autorest.Encode("path", typeParameter), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go index bb80694c634..a77180ddc68 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go @@ -81,7 +81,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -162,7 +162,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -247,7 +247,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -328,7 +328,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -405,7 +405,7 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go index 2843539d6a8..e1e830c2f43 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go @@ -90,7 +90,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati "version": autorest.Encode("path", version), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -172,7 +172,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -258,7 +258,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -333,7 +333,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -412,7 +412,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go index e2b53b70479..82be43acfd4 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go @@ -91,7 +91,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -173,7 +173,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go index dedeebb8deb..b329db181d9 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go @@ -89,7 +89,7 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -169,7 +169,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -267,7 +267,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -348,7 +348,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -424,7 +424,7 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -506,7 +506,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -583,7 +583,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -663,7 +663,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -740,7 +740,7 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -812,7 +812,9 @@ func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGr // ListAll lists all of the virtual machines in the specified subscription. Use the nextLink property in the response // to get the next page of virtual machines. -func (client VirtualMachinesClient) ListAll(ctx context.Context) (result VirtualMachineListResultPage, err error) { +// Parameters: +// statusOnly - statusOnly=true enables fetching run time status of all Virtual Machines in the subscription. +func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly string) (result VirtualMachineListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll") defer func() { @@ -824,7 +826,7 @@ func (client VirtualMachinesClient) ListAll(ctx context.Context) (result Virtual }() } result.fn = client.listAllNextResults - req, err := client.ListAllPreparer(ctx) + req, err := client.ListAllPreparer(ctx, statusOnly) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") return @@ -846,15 +848,18 @@ func (client VirtualMachinesClient) ListAll(ctx context.Context) (result Virtual } // ListAllPreparer prepares the ListAll request. -func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { +func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusOnly string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(statusOnly) > 0 { + queryParameters["statusOnly"] = autorest.Encode("query", statusOnly) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -906,7 +911,7 @@ func (client VirtualMachinesClient) listAllNextResults(ctx context.Context, last } // ListAllComplete enumerates all values, automatically crossing page boundaries as required. -func (client VirtualMachinesClient) ListAllComplete(ctx context.Context) (result VirtualMachineListResultIterator, err error) { +func (client VirtualMachinesClient) ListAllComplete(ctx context.Context, statusOnly string) (result VirtualMachineListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll") defer func() { @@ -917,7 +922,7 @@ func (client VirtualMachinesClient) ListAllComplete(ctx context.Context) (result tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListAll(ctx) + result.page, err = client.ListAll(ctx, statusOnly) return } @@ -965,7 +970,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1047,7 +1052,7 @@ func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1155,7 +1160,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1235,7 +1240,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1278,7 +1283,83 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu return } -// Redeploy the operation to redeploy a virtual machine. +// Reapply the operation to reapply a virtual machine's state. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMName - the name of the virtual machine. +func (client VirtualMachinesClient) Reapply(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesReapplyFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Reapply") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ReapplyPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reapply", nil, "Failure preparing request") + return + } + + result, err = client.ReapplySender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reapply", result.Response(), "Failure sending request") + return + } + + return +} + +// ReapplyPreparer prepares the Reapply request. +func (client VirtualMachinesClient) ReapplyPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReapplySender sends the Reapply request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ReapplySender(req *http.Request) (future VirtualMachinesReapplyFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ReapplyResponder handles the response to the Reapply request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ReapplyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Redeploy shuts down the virtual machine, moves it to a new node, and powers it back on. // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. @@ -1316,7 +1397,7 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1393,7 +1474,7 @@ func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1474,7 +1555,7 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1557,7 +1638,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1636,7 +1717,7 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1713,7 +1794,7 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go index 041df698055..7e4d6d0a6b7 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go @@ -82,7 +82,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -163,7 +163,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -248,7 +248,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -329,7 +329,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go index 6bdc9f2b322..c20c01d515c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go @@ -80,7 +80,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -162,7 +162,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -235,7 +235,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeP "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -312,7 +312,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go index 97916d2125d..54b1393fe57 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go @@ -177,7 +177,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -258,7 +258,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -339,7 +339,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -422,7 +422,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -508,7 +508,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain), @@ -586,7 +586,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -663,7 +663,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -741,7 +741,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -854,7 +854,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -966,7 +966,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1082,7 +1082,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1193,7 +1193,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1279,7 +1279,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1327,7 +1327,8 @@ func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Respons return } -// Redeploy redeploy one or more virtual machines in a VM scale set. +// Redeploy shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers +// them back on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. @@ -1366,7 +1367,7 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1449,7 +1450,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1532,7 +1533,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1614,7 +1615,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1696,7 +1697,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1778,7 +1779,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1864,7 +1865,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go new file mode 100644 index 00000000000..3fe6bf51dd2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go @@ -0,0 +1,459 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// VirtualMachineScaleSetVMExtensionsClient is the compute Client +type VirtualMachineScaleSetVMExtensionsClient struct { + BaseClient +} + +// NewVirtualMachineScaleSetVMExtensionsClient creates an instance of the VirtualMachineScaleSetVMExtensionsClient +// client. +func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string) VirtualMachineScaleSetVMExtensionsClient { + return NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetVMExtensionsClient client. +func NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMExtensionsClient { + return VirtualMachineScaleSetVMExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the VMSS VM extension. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +// instanceID - the instance ID of the virtual machine. +// VMExtensionName - the name of the virtual machine extension. +// extensionParameters - parameters supplied to the Create Virtual Machine Extension operation. +func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, extensionParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the VMSS VM extension. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +// instanceID - the instance ID of the virtual machine. +// VMExtensionName - the name of the virtual machine extension. +func (client VirtualMachineScaleSetVMExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string) (result VirtualMachineScaleSetVMExtensionsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetVMExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the VMSS VM extension. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +// instanceID - the instance ID of the virtual machine. +// VMExtensionName - the name of the virtual machine extension. +// expand - the expand expression to apply on the operation. +func (client VirtualMachineScaleSetVMExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetVMExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to get all extensions of an instance in Virtual Machine Scaleset. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +// instanceID - the instance ID of the virtual machine. +// expand - the expand expression to apply on the operation. +func (client VirtualMachineScaleSetVMExtensionsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (result VirtualMachineExtensionsListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetVMExtensionsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineExtensionsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update the operation to update the VMSS VM extension. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +// instanceID - the instance ID of the virtual machine. +// VMExtensionName - the name of the virtual machine extension. +// extensionParameters - parameters supplied to the Update Virtual Machine Extension operation. +func (client VirtualMachineScaleSetVMExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (result VirtualMachineScaleSetVMExtensionsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, extensionParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client VirtualMachineScaleSetVMExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go index bf227197eb4..155e6eee83d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go @@ -83,7 +83,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -161,7 +161,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -204,7 +204,8 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. // instanceID - the instance ID of the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { +// expand - the expand expression to apply on the operation. +func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (result VirtualMachineScaleSetVM, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Get") defer func() { @@ -215,7 +216,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG tracing.EndSpan(ctx, sc, err) }() } - req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") return @@ -237,7 +238,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG } // GetPreparer prepares the Get request. -func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -245,10 +246,13 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -324,7 +328,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -405,7 +409,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -524,7 +528,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -606,7 +610,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -649,7 +653,8 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Respo return } -// Redeploy redeploys a virtual machine in a VM scale set. +// Redeploy shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it back +// on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. @@ -689,7 +694,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -768,7 +773,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -852,7 +857,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -930,7 +935,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1015,7 +1020,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1096,7 +1101,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1196,7 +1201,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go index 04bcbb0490d..55335fe9a51 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go @@ -90,7 +90,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-03-01" + const APIVersion = "2019-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go index 8ab53a36914..cf551f2a5e1 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go @@ -2024,7 +2024,7 @@ type OperationPropertiesDefinition struct { ServiceSpecification *OperationServiceSpecificationDefinition `json:"serviceSpecification,omitempty"` } -// OperationServiceSpecificationDefinition the definition of Azure Monitoring metrics list. +// OperationServiceSpecificationDefinition the definition of Azure Monitoring list. type OperationServiceSpecificationDefinition struct { // MetricSpecifications - A list of Azure Monitoring metrics definition. MetricSpecifications *[]OperationMetricSpecificationDefinition `json:"metricSpecifications,omitempty"` @@ -3022,7 +3022,7 @@ func (r Resource) MarshalJSON() ([]byte, error) { // RetentionPolicy the retention policy for a container registry. type RetentionPolicy struct { - // Days - The number of days to retain manifest before it expires. + // Days - The number of days to retain an untagged manifest after which it gets purged. Days *int32 `json:"days,omitempty"` // LastUpdatedTime - READ-ONLY; The timestamp when the policy was last updated. LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bastionhosts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bastionhosts.go index c15c2e1a852..7d2f594dfc0 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bastionhosts.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bastionhosts.go @@ -496,3 +496,83 @@ func (client BastionHostsClient) ListByResourceGroupComplete(ctx context.Context result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) return } + +// UpdateTags updates bastion host tags. +// Parameters: +// resourceGroupName - the resource group name of the BastionHost. +// bastionHostName - the name of the bastionHost. +// bastionHostParameters - parameters supplied to update a bastion host tags. +func (client BastionHostsClient) UpdateTags(ctx context.Context, resourceGroupName string, bastionHostName string, bastionHostParameters TagsObject) (result BastionHostsUpdateTagsFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.UpdateTags") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, bastionHostName, bastionHostParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = client.UpdateTagsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "UpdateTags", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client BastionHostsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, bastionHostName string, bastionHostParameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "bastionHostName": autorest.Encode("path", bastionHostName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}", pathParameters), + autorest.WithJSON(bastionHostParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client BastionHostsClient) UpdateTagsSender(req *http.Request) (future BastionHostsUpdateTagsFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client BastionHostsClient) UpdateTagsResponder(resp *http.Response) (result BastionHost, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/connectionmonitors.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/connectionmonitors.go index 843e1f5594c..ca171615eb0 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/connectionmonitors.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/connectionmonitors.go @@ -601,3 +601,85 @@ func (client ConnectionMonitorsClient) StopResponder(resp *http.Response) (resul result.Response = resp return } + +// UpdateTags update tags of the specified connection monitor. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkWatcherName - the name of the network watcher. +// connectionMonitorName - the name of the connection monitor. +// parameters - parameters supplied to update connection monitor tags. +func (client ConnectionMonitorsClient) UpdateTags(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters TagsObject) (result ConnectionMonitorResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ConnectionMonitorsClient.UpdateTags") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "UpdateTags", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateTagsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "UpdateTags", resp, "Failure sending request") + return + } + + result, err = client.UpdateTagsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsClient", "UpdateTags", resp, "Failure responding to request") + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client ConnectionMonitorsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "connectionMonitorName": autorest.Encode("path", connectionMonitorName), + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client ConnectionMonitorsClient) UpdateTagsSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client ConnectionMonitorsClient) UpdateTagsResponder(resp *http.Response) (result ConnectionMonitorResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicies.go new file mode 100644 index 00000000000..a15914a1543 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicies.go @@ -0,0 +1,582 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FirewallPoliciesClient is the network Client +type FirewallPoliciesClient struct { + BaseClient +} + +// NewFirewallPoliciesClient creates an instance of the FirewallPoliciesClient client. +func NewFirewallPoliciesClient(subscriptionID string) FirewallPoliciesClient { + return NewFirewallPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFirewallPoliciesClientWithBaseURI creates an instance of the FirewallPoliciesClient client. +func NewFirewallPoliciesClientWithBaseURI(baseURI string, subscriptionID string) FirewallPoliciesClient { + return FirewallPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates the specified Firewall Policy. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +// parameters - parameters supplied to the create or update Firewall Policy operation. +func (client FirewallPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters FirewallPolicy) (result FirewallPoliciesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, firewallPolicyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client FirewallPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters FirewallPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Etag = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) CreateOrUpdateSender(req *http.Request) (future FirewallPoliciesCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified Firewall Policy. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +func (client FirewallPoliciesClient) Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPoliciesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, firewallPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client FirewallPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) DeleteSender(req *http.Request) (future FirewallPoliciesDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified Firewall Policy. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +// expand - expands referenced resources. +func (client FirewallPoliciesClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, expand string) (result FirewallPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, firewallPolicyName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client FirewallPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) GetResponder(resp *http.Response) (result FirewallPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all Firewall Policies in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client FirewallPoliciesClient) List(ctx context.Context, resourceGroupName string) (result FirewallPolicyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.List") + defer func() { + sc := -1 + if result.fplr.Response.Response != nil { + sc = result.fplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.fplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", resp, "Failure sending request") + return + } + + result.fplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client FirewallPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) ListResponder(resp *http.Response) (result FirewallPolicyListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client FirewallPoliciesClient) listNextResults(ctx context.Context, lastResults FirewallPolicyListResult) (result FirewallPolicyListResult, err error) { + req, err := lastResults.firewallPolicyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client FirewallPoliciesClient) ListComplete(ctx context.Context, resourceGroupName string) (result FirewallPolicyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all the Firewall Policies in a subscription. +func (client FirewallPoliciesClient) ListAll(ctx context.Context) (result FirewallPolicyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.ListAll") + defer func() { + sc := -1 + if result.fplr.Response.Response != nil { + sc = result.fplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.fplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", resp, "Failure sending request") + return + } + + result.fplr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client FirewallPoliciesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/firewallPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) ListAllSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) ListAllResponder(resp *http.Response) (result FirewallPolicyListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client FirewallPoliciesClient) listAllNextResults(ctx context.Context, lastResults FirewallPolicyListResult) (result FirewallPolicyListResult, err error) { + req, err := lastResults.firewallPolicyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client FirewallPoliciesClient) ListAllComplete(ctx context.Context) (result FirewallPolicyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// UpdateTags updates a Firewall Policy Tags. +// Parameters: +// resourceGroupName - the resource group name of the Firewall Policy. +// firewallPolicyName - the name of the Firewall Policy being updated. +// firewallPolicyParameters - parameters supplied to Update Firewall Policy tags. +func (client FirewallPoliciesClient) UpdateTags(ctx context.Context, resourceGroupName string, firewallPolicyName string, firewallPolicyParameters TagsObject) (result FirewallPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.UpdateTags") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, firewallPolicyName, firewallPolicyParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateTagsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", resp, "Failure sending request") + return + } + + result, err = client.UpdateTagsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", resp, "Failure responding to request") + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client FirewallPoliciesClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, firewallPolicyParameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters), + autorest.WithJSON(firewallPolicyParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPoliciesClient) UpdateTagsSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client FirewallPoliciesClient) UpdateTagsResponder(resp *http.Response) (result FirewallPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go new file mode 100644 index 00000000000..989c3d1635d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go @@ -0,0 +1,409 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FirewallPolicyRuleGroupsClient is the network Client +type FirewallPolicyRuleGroupsClient struct { + BaseClient +} + +// NewFirewallPolicyRuleGroupsClient creates an instance of the FirewallPolicyRuleGroupsClient client. +func NewFirewallPolicyRuleGroupsClient(subscriptionID string) FirewallPolicyRuleGroupsClient { + return NewFirewallPolicyRuleGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFirewallPolicyRuleGroupsClientWithBaseURI creates an instance of the FirewallPolicyRuleGroupsClient client. +func NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI string, subscriptionID string) FirewallPolicyRuleGroupsClient { + return FirewallPolicyRuleGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates the specified FirewallPolicyRuleGroup. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +// ruleGroupName - the name of the FirewallPolicyRuleGroup. +// parameters - parameters supplied to the create or update FirewallPolicyRuleGroup operation. +func (client FirewallPolicyRuleGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string, parameters FirewallPolicyRuleGroup) (result FirewallPolicyRuleGroupsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.InclusiveMaximum, Rule: int64(65000), Chain: nil}, + {Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.InclusiveMinimum, Rule: 100, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client FirewallPolicyRuleGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string, parameters FirewallPolicyRuleGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleGroupName": autorest.Encode("path", ruleGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Etag = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateSender(req *http.Request) (future FirewallPolicyRuleGroupsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified FirewallPolicyRuleGroup. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +// ruleGroupName - the name of the FirewallPolicyRuleGroup. +func (client FirewallPolicyRuleGroupsClient) Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result FirewallPolicyRuleGroupsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client FirewallPolicyRuleGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleGroupName": autorest.Encode("path", ruleGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPolicyRuleGroupsClient) DeleteSender(req *http.Request) (future FirewallPolicyRuleGroupsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client FirewallPolicyRuleGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified FirewallPolicyRuleGroup. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +// ruleGroupName - the name of the FirewallPolicyRuleGroup. +func (client FirewallPolicyRuleGroupsClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result FirewallPolicyRuleGroup, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client FirewallPolicyRuleGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleGroupName": autorest.Encode("path", ruleGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPolicyRuleGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client FirewallPolicyRuleGroupsClient) GetResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all FirewallPolicyRuleGroups in a FirewallPolicy resource. +// Parameters: +// resourceGroupName - the name of the resource group. +// firewallPolicyName - the name of the Firewall Policy. +func (client FirewallPolicyRuleGroupsClient) List(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPolicyRuleGroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.List") + defer func() { + sc := -1 + if result.fprglr.Response.Response != nil { + sc = result.fprglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, firewallPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.fprglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", resp, "Failure sending request") + return + } + + result.fprglr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client FirewallPolicyRuleGroupsClient) ListPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "firewallPolicyName": autorest.Encode("path", firewallPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FirewallPolicyRuleGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FirewallPolicyRuleGroupsClient) ListResponder(resp *http.Response) (result FirewallPolicyRuleGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client FirewallPolicyRuleGroupsClient) listNextResults(ctx context.Context, lastResults FirewallPolicyRuleGroupListResult) (result FirewallPolicyRuleGroupListResult, err error) { + req, err := lastResults.firewallPolicyRuleGroupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client FirewallPolicyRuleGroupsClient) ListComplete(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPolicyRuleGroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, firewallPolicyName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/models.go index 3f4ca3c8680..49f667bf9a3 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/models.go @@ -869,6 +869,74 @@ func PossibleExpressRoutePortsEncapsulationValues() []ExpressRoutePortsEncapsula return []ExpressRoutePortsEncapsulation{Dot1Q, QinQ} } +// FirewallPolicyFilterRuleActionType enumerates the values for firewall policy filter rule action type. +type FirewallPolicyFilterRuleActionType string + +const ( + // FirewallPolicyFilterRuleActionTypeAlert ... + FirewallPolicyFilterRuleActionTypeAlert FirewallPolicyFilterRuleActionType = "Alert " + // FirewallPolicyFilterRuleActionTypeAllow ... + FirewallPolicyFilterRuleActionTypeAllow FirewallPolicyFilterRuleActionType = "Allow" + // FirewallPolicyFilterRuleActionTypeDeny ... + FirewallPolicyFilterRuleActionTypeDeny FirewallPolicyFilterRuleActionType = "Deny" +) + +// PossibleFirewallPolicyFilterRuleActionTypeValues returns an array of possible values for the FirewallPolicyFilterRuleActionType const type. +func PossibleFirewallPolicyFilterRuleActionTypeValues() []FirewallPolicyFilterRuleActionType { + return []FirewallPolicyFilterRuleActionType{FirewallPolicyFilterRuleActionTypeAlert, FirewallPolicyFilterRuleActionTypeAllow, FirewallPolicyFilterRuleActionTypeDeny} +} + +// FirewallPolicyNatRuleActionType enumerates the values for firewall policy nat rule action type. +type FirewallPolicyNatRuleActionType string + +const ( + // DNAT ... + DNAT FirewallPolicyNatRuleActionType = "DNAT" + // SNAT ... + SNAT FirewallPolicyNatRuleActionType = "SNAT" +) + +// PossibleFirewallPolicyNatRuleActionTypeValues returns an array of possible values for the FirewallPolicyNatRuleActionType const type. +func PossibleFirewallPolicyNatRuleActionTypeValues() []FirewallPolicyNatRuleActionType { + return []FirewallPolicyNatRuleActionType{DNAT, SNAT} +} + +// FirewallPolicyRuleConditionApplicationProtocolType enumerates the values for firewall policy rule condition +// application protocol type. +type FirewallPolicyRuleConditionApplicationProtocolType string + +const ( + // FirewallPolicyRuleConditionApplicationProtocolTypeHTTP ... + FirewallPolicyRuleConditionApplicationProtocolTypeHTTP FirewallPolicyRuleConditionApplicationProtocolType = "Http" + // FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS ... + FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS FirewallPolicyRuleConditionApplicationProtocolType = "Https" +) + +// PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues returns an array of possible values for the FirewallPolicyRuleConditionApplicationProtocolType const type. +func PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues() []FirewallPolicyRuleConditionApplicationProtocolType { + return []FirewallPolicyRuleConditionApplicationProtocolType{FirewallPolicyRuleConditionApplicationProtocolTypeHTTP, FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS} +} + +// FirewallPolicyRuleConditionNetworkProtocol enumerates the values for firewall policy rule condition network +// protocol. +type FirewallPolicyRuleConditionNetworkProtocol string + +const ( + // FirewallPolicyRuleConditionNetworkProtocolAny ... + FirewallPolicyRuleConditionNetworkProtocolAny FirewallPolicyRuleConditionNetworkProtocol = "Any" + // FirewallPolicyRuleConditionNetworkProtocolICMP ... + FirewallPolicyRuleConditionNetworkProtocolICMP FirewallPolicyRuleConditionNetworkProtocol = "ICMP" + // FirewallPolicyRuleConditionNetworkProtocolTCP ... + FirewallPolicyRuleConditionNetworkProtocolTCP FirewallPolicyRuleConditionNetworkProtocol = "TCP" + // FirewallPolicyRuleConditionNetworkProtocolUDP ... + FirewallPolicyRuleConditionNetworkProtocolUDP FirewallPolicyRuleConditionNetworkProtocol = "UDP" +) + +// PossibleFirewallPolicyRuleConditionNetworkProtocolValues returns an array of possible values for the FirewallPolicyRuleConditionNetworkProtocol const type. +func PossibleFirewallPolicyRuleConditionNetworkProtocolValues() []FirewallPolicyRuleConditionNetworkProtocol { + return []FirewallPolicyRuleConditionNetworkProtocol{FirewallPolicyRuleConditionNetworkProtocolAny, FirewallPolicyRuleConditionNetworkProtocolICMP, FirewallPolicyRuleConditionNetworkProtocolTCP, FirewallPolicyRuleConditionNetworkProtocolUDP} +} + // FlowLogFormatType enumerates the values for flow log format type. type FlowLogFormatType string @@ -1452,6 +1520,40 @@ func PossibleRouteNextHopTypeValues() []RouteNextHopType { return []RouteNextHopType{RouteNextHopTypeInternet, RouteNextHopTypeNone, RouteNextHopTypeVirtualAppliance, RouteNextHopTypeVirtualNetworkGateway, RouteNextHopTypeVnetLocal} } +// RuleConditionType enumerates the values for rule condition type. +type RuleConditionType string + +const ( + // RuleConditionTypeApplicationRuleCondition ... + RuleConditionTypeApplicationRuleCondition RuleConditionType = "ApplicationRuleCondition" + // RuleConditionTypeFirewallPolicyRuleCondition ... + RuleConditionTypeFirewallPolicyRuleCondition RuleConditionType = "FirewallPolicyRuleCondition" + // RuleConditionTypeNetworkRuleCondition ... + RuleConditionTypeNetworkRuleCondition RuleConditionType = "NetworkRuleCondition" +) + +// PossibleRuleConditionTypeValues returns an array of possible values for the RuleConditionType const type. +func PossibleRuleConditionTypeValues() []RuleConditionType { + return []RuleConditionType{RuleConditionTypeApplicationRuleCondition, RuleConditionTypeFirewallPolicyRuleCondition, RuleConditionTypeNetworkRuleCondition} +} + +// RuleType enumerates the values for rule type. +type RuleType string + +const ( + // RuleTypeFirewallPolicyFilterRule ... + RuleTypeFirewallPolicyFilterRule RuleType = "FirewallPolicyFilterRule" + // RuleTypeFirewallPolicyNatRule ... + RuleTypeFirewallPolicyNatRule RuleType = "FirewallPolicyNatRule" + // RuleTypeFirewallPolicyRule ... + RuleTypeFirewallPolicyRule RuleType = "FirewallPolicyRule" +) + +// PossibleRuleTypeValues returns an array of possible values for the RuleType const type. +func PossibleRuleTypeValues() []RuleType { + return []RuleType{RuleTypeFirewallPolicyFilterRule, RuleTypeFirewallPolicyNatRule, RuleTypeFirewallPolicyRule} +} + // SecurityRuleAccess enumerates the values for security rule access. type SecurityRuleAccess string @@ -3595,10 +3697,10 @@ type ApplicationGatewayOnDemandProbe struct { PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty"` // Match - Criterion for classifying a healthy probe response. Match *ApplicationGatewayProbeHealthResponseMatch `json:"match,omitempty"` - // BackendPoolName - Name of backend pool of application gateway to which probe request will be sent. - BackendPoolName *string `json:"backendPoolName,omitempty"` - // BackendHTTPSettingName - Name of backend http setting of application gateway to be used for test probe. - BackendHTTPSettingName *string `json:"backendHttpSettingName,omitempty"` + // BackendAddressPool - Reference of backend pool of application gateway to which probe request will be sent. + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + // BackendHTTPSettings - Reference of backend http setting of application gateway to be used for test probe. + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` } // ApplicationGatewayPathRule path rule of URL path map of an application gateway. @@ -4885,6 +4987,77 @@ type ApplicationGatewayWebApplicationFirewallConfiguration struct { Exclusions *[]ApplicationGatewayFirewallExclusion `json:"exclusions,omitempty"` } +// ApplicationRuleCondition rule condition of type application. +type ApplicationRuleCondition struct { + // SourceAddresses - List of source IP addresses for this rule. + SourceAddresses *[]string `json:"sourceAddresses,omitempty"` + // DestinationAddresses - List of destination IP addresses or Service Tags. + DestinationAddresses *[]string `json:"destinationAddresses,omitempty"` + // Protocols - Array of Application Protocols. + Protocols *[]FirewallPolicyRuleConditionApplicationProtocol `json:"protocols,omitempty"` + // TargetFqdns - List of FQDNs for this rule condition. + TargetFqdns *[]string `json:"targetFqdns,omitempty"` + // FqdnTags - List of FQDN Tags for this rule condition. + FqdnTags *[]string `json:"fqdnTags,omitempty"` + // Name - Name of the rule condition. + Name *string `json:"name,omitempty"` + // Description - Description of the rule condition. + Description *string `json:"description,omitempty"` + // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition' + RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationRuleCondition. +func (arc ApplicationRuleCondition) MarshalJSON() ([]byte, error) { + arc.RuleConditionType = RuleConditionTypeApplicationRuleCondition + objectMap := make(map[string]interface{}) + if arc.SourceAddresses != nil { + objectMap["sourceAddresses"] = arc.SourceAddresses + } + if arc.DestinationAddresses != nil { + objectMap["destinationAddresses"] = arc.DestinationAddresses + } + if arc.Protocols != nil { + objectMap["protocols"] = arc.Protocols + } + if arc.TargetFqdns != nil { + objectMap["targetFqdns"] = arc.TargetFqdns + } + if arc.FqdnTags != nil { + objectMap["fqdnTags"] = arc.FqdnTags + } + if arc.Name != nil { + objectMap["name"] = arc.Name + } + if arc.Description != nil { + objectMap["description"] = arc.Description + } + if arc.RuleConditionType != "" { + objectMap["ruleConditionType"] = arc.RuleConditionType + } + return json.Marshal(objectMap) +} + +// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition. +func (arc ApplicationRuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) { + return &arc, true +} + +// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition. +func (arc ApplicationRuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return nil, false +} + +// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition. +func (arc ApplicationRuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) { + return nil, false +} + +// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition. +func (arc ApplicationRuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) { + return &arc, true +} + // ApplicationSecurityGroup an application security group in a resource group. type ApplicationSecurityGroup struct { autorest.Response `json:"-"` @@ -6923,6 +7096,18 @@ type AzureFirewallPropertiesFormat struct { ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` // ThreatIntelMode - The operation mode for Threat Intelligence. Possible values include: 'AzureFirewallThreatIntelModeAlert', 'AzureFirewallThreatIntelModeDeny', 'AzureFirewallThreatIntelModeOff' ThreatIntelMode AzureFirewallThreatIntelMode `json:"threatIntelMode,omitempty"` + // VirtualHub - The virtualHub to which the firewall belongs. + VirtualHub *SubResource `json:"virtualHub,omitempty"` + // FirewallPolicy - The firewallPolicy associated with this azure firewall. + FirewallPolicy *SubResource `json:"firewallPolicy,omitempty"` + // HubIPAddresses - READ-ONLY; IP addresses associated with AzureFirewall. + HubIPAddresses *HubIPAddresses `json:"hubIpAddresses,omitempty"` +} + +// AzureFirewallPublicIPAddress public IP Address associated with azure firewall. +type AzureFirewallPublicIPAddress struct { + // Address - Public IP Address value. + Address *string `json:"address,omitempty"` } // AzureFirewallRCAction properties of the AzureFirewallRCAction. @@ -7567,6 +7752,35 @@ func (future *BastionHostsDeleteFuture) Result(client BastionHostsClient) (ar au return } +// BastionHostsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type BastionHostsUpdateTagsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *BastionHostsUpdateTagsFuture) Result(client BastionHostsClient) (bh BastionHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.BastionHostsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bh.Response.Response, err = future.GetResult(sender); err == nil && bh.Response.Response.StatusCode != http.StatusNoContent { + bh, err = client.UpdateTagsResponder(bh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsUpdateTagsFuture", "Result", bh.Response.Response, "Failure responding to request") + } + } + return +} + // BGPCommunity contains bgp community information offered in Service Community resources. type BGPCommunity struct { // ServiceSupportedRegion - The region which the service support. e.g. For O365, region is Global. @@ -13226,18 +13440,1058 @@ type ExpressRouteServiceProviderListResult struct { NextLink *string `json:"nextLink,omitempty"` } -// ExpressRouteServiceProviderListResultIterator provides access to a complete listing of -// ExpressRouteServiceProvider values. -type ExpressRouteServiceProviderListResultIterator struct { +// ExpressRouteServiceProviderListResultIterator provides access to a complete listing of +// ExpressRouteServiceProvider values. +type ExpressRouteServiceProviderListResultIterator struct { + i int + page ExpressRouteServiceProviderListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteServiceProviderListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteServiceProviderListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteServiceProviderListResultIterator) Response() ExpressRouteServiceProviderListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteServiceProviderListResultIterator) Value() ExpressRouteServiceProvider { + if !iter.page.NotDone() { + return ExpressRouteServiceProvider{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteServiceProviderListResultIterator type. +func NewExpressRouteServiceProviderListResultIterator(page ExpressRouteServiceProviderListResultPage) ExpressRouteServiceProviderListResultIterator { + return ExpressRouteServiceProviderListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ersplr ExpressRouteServiceProviderListResult) IsEmpty() bool { + return ersplr.Value == nil || len(*ersplr.Value) == 0 +} + +// expressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ersplr ExpressRouteServiceProviderListResult) expressRouteServiceProviderListResultPreparer(ctx context.Context) (*http.Request, error) { + if ersplr.NextLink == nil || len(to.String(ersplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ersplr.NextLink))) +} + +// ExpressRouteServiceProviderListResultPage contains a page of ExpressRouteServiceProvider values. +type ExpressRouteServiceProviderListResultPage struct { + fn func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error) + ersplr ExpressRouteServiceProviderListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ersplr) + if err != nil { + return err + } + page.ersplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteServiceProviderListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteServiceProviderListResultPage) NotDone() bool { + return !page.ersplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteServiceProviderListResultPage) Response() ExpressRouteServiceProviderListResult { + return page.ersplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteServiceProviderListResultPage) Values() []ExpressRouteServiceProvider { + if page.ersplr.IsEmpty() { + return nil + } + return *page.ersplr.Value +} + +// Creates a new instance of the ExpressRouteServiceProviderListResultPage type. +func NewExpressRouteServiceProviderListResultPage(getNextPage func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)) ExpressRouteServiceProviderListResultPage { + return ExpressRouteServiceProviderListResultPage{fn: getNextPage} +} + +// ExpressRouteServiceProviderPropertiesFormat properties of ExpressRouteServiceProvider. +type ExpressRouteServiceProviderPropertiesFormat struct { + // PeeringLocations - Get a list of peering locations. + PeeringLocations *[]string `json:"peeringLocations,omitempty"` + // BandwidthsOffered - Gets bandwidths offered. + BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` + // ProvisioningState - Gets the provisioning state of the resource. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// FirewallPoliciesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type FirewallPoliciesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *FirewallPoliciesCreateOrUpdateFuture) Result(client FirewallPoliciesClient) (fp FirewallPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fp.Response.Response, err = future.GetResult(sender); err == nil && fp.Response.Response.StatusCode != http.StatusNoContent { + fp, err = client.CreateOrUpdateResponder(fp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", fp.Response.Response, "Failure responding to request") + } + } + return +} + +// FirewallPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type FirewallPoliciesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *FirewallPoliciesDeleteFuture) Result(client FirewallPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// FirewallPolicy firewallPolicy Resource. +type FirewallPolicy struct { + autorest.Response `json:"-"` + // FirewallPolicyPropertiesFormat - Properties of the firewall policy. + *FirewallPolicyPropertiesFormat `json:"properties,omitempty"` + // Etag - READ-ONLY; Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for FirewallPolicy. +func (fp FirewallPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fp.FirewallPolicyPropertiesFormat != nil { + objectMap["properties"] = fp.FirewallPolicyPropertiesFormat + } + if fp.ID != nil { + objectMap["id"] = fp.ID + } + if fp.Location != nil { + objectMap["location"] = fp.Location + } + if fp.Tags != nil { + objectMap["tags"] = fp.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FirewallPolicy struct. +func (fp *FirewallPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var firewallPolicyPropertiesFormat FirewallPolicyPropertiesFormat + err = json.Unmarshal(*v, &firewallPolicyPropertiesFormat) + if err != nil { + return err + } + fp.FirewallPolicyPropertiesFormat = &firewallPolicyPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fp.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fp.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + fp.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + fp.Tags = tags + } + } + } + + return nil +} + +// FirewallPolicyFilterRule firewall Policy Filter Rule +type FirewallPolicyFilterRule struct { + // Action - The action type of a Filter rule + Action *FirewallPolicyFilterRuleAction `json:"action,omitempty"` + // RuleConditions - Collection of rule conditions used by a rule. + RuleConditions *[]BasicFirewallPolicyRuleCondition `json:"ruleConditions,omitempty"` + // Name - Name of the Rule + Name *string `json:"name,omitempty"` + // Priority - Priority of the Firewall Policy Rule resource. + Priority *int32 `json:"priority,omitempty"` + // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule' + RuleType RuleType `json:"ruleType,omitempty"` +} + +// MarshalJSON is the custom marshaler for FirewallPolicyFilterRule. +func (fpfr FirewallPolicyFilterRule) MarshalJSON() ([]byte, error) { + fpfr.RuleType = RuleTypeFirewallPolicyFilterRule + objectMap := make(map[string]interface{}) + if fpfr.Action != nil { + objectMap["action"] = fpfr.Action + } + if fpfr.RuleConditions != nil { + objectMap["ruleConditions"] = fpfr.RuleConditions + } + if fpfr.Name != nil { + objectMap["name"] = fpfr.Name + } + if fpfr.Priority != nil { + objectMap["priority"] = fpfr.Priority + } + if fpfr.RuleType != "" { + objectMap["ruleType"] = fpfr.RuleType + } + return json.Marshal(objectMap) +} + +// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule. +func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) { + return nil, false +} + +// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule. +func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) { + return &fpfr, true +} + +// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule. +func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) { + return nil, false +} + +// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule. +func (fpfr FirewallPolicyFilterRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) { + return &fpfr, true +} + +// UnmarshalJSON is the custom unmarshaler for FirewallPolicyFilterRule struct. +func (fpfr *FirewallPolicyFilterRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "action": + if v != nil { + var action FirewallPolicyFilterRuleAction + err = json.Unmarshal(*v, &action) + if err != nil { + return err + } + fpfr.Action = &action + } + case "ruleConditions": + if v != nil { + ruleConditions, err := unmarshalBasicFirewallPolicyRuleConditionArray(*v) + if err != nil { + return err + } + fpfr.RuleConditions = &ruleConditions + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fpfr.Name = &name + } + case "priority": + if v != nil { + var priority int32 + err = json.Unmarshal(*v, &priority) + if err != nil { + return err + } + fpfr.Priority = &priority + } + case "ruleType": + if v != nil { + var ruleType RuleType + err = json.Unmarshal(*v, &ruleType) + if err != nil { + return err + } + fpfr.RuleType = ruleType + } + } + } + + return nil +} + +// FirewallPolicyFilterRuleAction properties of the FirewallPolicyFilterRuleAction. +type FirewallPolicyFilterRuleAction struct { + // Type - The type of action. Possible values include: 'FirewallPolicyFilterRuleActionTypeAllow', 'FirewallPolicyFilterRuleActionTypeDeny', 'FirewallPolicyFilterRuleActionTypeAlert' + Type FirewallPolicyFilterRuleActionType `json:"type,omitempty"` +} + +// FirewallPolicyListResult response for ListFirewallPolicies API service call. +type FirewallPolicyListResult struct { + autorest.Response `json:"-"` + // Value - List of Firewall Policies in a resource group. + Value *[]FirewallPolicy `json:"value,omitempty"` + // NextLink - URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// FirewallPolicyListResultIterator provides access to a complete listing of FirewallPolicy values. +type FirewallPolicyListResultIterator struct { + i int + page FirewallPolicyListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *FirewallPolicyListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *FirewallPolicyListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter FirewallPolicyListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter FirewallPolicyListResultIterator) Response() FirewallPolicyListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter FirewallPolicyListResultIterator) Value() FirewallPolicy { + if !iter.page.NotDone() { + return FirewallPolicy{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the FirewallPolicyListResultIterator type. +func NewFirewallPolicyListResultIterator(page FirewallPolicyListResultPage) FirewallPolicyListResultIterator { + return FirewallPolicyListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (fplr FirewallPolicyListResult) IsEmpty() bool { + return fplr.Value == nil || len(*fplr.Value) == 0 +} + +// firewallPolicyListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (fplr FirewallPolicyListResult) firewallPolicyListResultPreparer(ctx context.Context) (*http.Request, error) { + if fplr.NextLink == nil || len(to.String(fplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(fplr.NextLink))) +} + +// FirewallPolicyListResultPage contains a page of FirewallPolicy values. +type FirewallPolicyListResultPage struct { + fn func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error) + fplr FirewallPolicyListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *FirewallPolicyListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.fplr) + if err != nil { + return err + } + page.fplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *FirewallPolicyListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page FirewallPolicyListResultPage) NotDone() bool { + return !page.fplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page FirewallPolicyListResultPage) Response() FirewallPolicyListResult { + return page.fplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page FirewallPolicyListResultPage) Values() []FirewallPolicy { + if page.fplr.IsEmpty() { + return nil + } + return *page.fplr.Value +} + +// Creates a new instance of the FirewallPolicyListResultPage type. +func NewFirewallPolicyListResultPage(getNextPage func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error)) FirewallPolicyListResultPage { + return FirewallPolicyListResultPage{fn: getNextPage} +} + +// FirewallPolicyNatRule firewall Policy NAT Rule +type FirewallPolicyNatRule struct { + // Action - The action type of a Nat rule, SNAT or DNAT + Action *FirewallPolicyNatRuleAction `json:"action,omitempty"` + // TranslatedAddress - The translated address for this NAT rule. + TranslatedAddress *string `json:"translatedAddress,omitempty"` + // TranslatedPort - The translated port for this NAT rule. + TranslatedPort *string `json:"translatedPort,omitempty"` + // RuleCondition - The match conditions for incoming traffic + RuleCondition BasicFirewallPolicyRuleCondition `json:"ruleCondition,omitempty"` + // Name - Name of the Rule + Name *string `json:"name,omitempty"` + // Priority - Priority of the Firewall Policy Rule resource. + Priority *int32 `json:"priority,omitempty"` + // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule' + RuleType RuleType `json:"ruleType,omitempty"` +} + +// MarshalJSON is the custom marshaler for FirewallPolicyNatRule. +func (fpnr FirewallPolicyNatRule) MarshalJSON() ([]byte, error) { + fpnr.RuleType = RuleTypeFirewallPolicyNatRule + objectMap := make(map[string]interface{}) + if fpnr.Action != nil { + objectMap["action"] = fpnr.Action + } + if fpnr.TranslatedAddress != nil { + objectMap["translatedAddress"] = fpnr.TranslatedAddress + } + if fpnr.TranslatedPort != nil { + objectMap["translatedPort"] = fpnr.TranslatedPort + } + objectMap["ruleCondition"] = fpnr.RuleCondition + if fpnr.Name != nil { + objectMap["name"] = fpnr.Name + } + if fpnr.Priority != nil { + objectMap["priority"] = fpnr.Priority + } + if fpnr.RuleType != "" { + objectMap["ruleType"] = fpnr.RuleType + } + return json.Marshal(objectMap) +} + +// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule. +func (fpnr FirewallPolicyNatRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) { + return &fpnr, true +} + +// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule. +func (fpnr FirewallPolicyNatRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) { + return nil, false +} + +// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule. +func (fpnr FirewallPolicyNatRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) { + return nil, false +} + +// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule. +func (fpnr FirewallPolicyNatRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) { + return &fpnr, true +} + +// UnmarshalJSON is the custom unmarshaler for FirewallPolicyNatRule struct. +func (fpnr *FirewallPolicyNatRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "action": + if v != nil { + var action FirewallPolicyNatRuleAction + err = json.Unmarshal(*v, &action) + if err != nil { + return err + } + fpnr.Action = &action + } + case "translatedAddress": + if v != nil { + var translatedAddress string + err = json.Unmarshal(*v, &translatedAddress) + if err != nil { + return err + } + fpnr.TranslatedAddress = &translatedAddress + } + case "translatedPort": + if v != nil { + var translatedPort string + err = json.Unmarshal(*v, &translatedPort) + if err != nil { + return err + } + fpnr.TranslatedPort = &translatedPort + } + case "ruleCondition": + if v != nil { + ruleCondition, err := unmarshalBasicFirewallPolicyRuleCondition(*v) + if err != nil { + return err + } + fpnr.RuleCondition = ruleCondition + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fpnr.Name = &name + } + case "priority": + if v != nil { + var priority int32 + err = json.Unmarshal(*v, &priority) + if err != nil { + return err + } + fpnr.Priority = &priority + } + case "ruleType": + if v != nil { + var ruleType RuleType + err = json.Unmarshal(*v, &ruleType) + if err != nil { + return err + } + fpnr.RuleType = ruleType + } + } + } + + return nil +} + +// FirewallPolicyNatRuleAction properties of the FirewallPolicyNatRuleAction. +type FirewallPolicyNatRuleAction struct { + // Type - The type of action. Possible values include: 'DNAT', 'SNAT' + Type FirewallPolicyNatRuleActionType `json:"type,omitempty"` +} + +// FirewallPolicyPropertiesFormat firewall Policy definition +type FirewallPolicyPropertiesFormat struct { + // RuleGroups - READ-ONLY; List of references to FirewallPolicyRuleGroups + RuleGroups *[]SubResource `json:"ruleGroups,omitempty"` + // ProvisioningState - The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // BasePolicy - The parent firewall policy from which rules are inherited. + BasePolicy *SubResource `json:"basePolicy,omitempty"` + // Firewalls - READ-ONLY; List of references to Azure Firewalls that this Firewall Policy is associated with + Firewalls *[]SubResource `json:"firewalls,omitempty"` + // ChildPolicies - READ-ONLY; List of references to Child Firewall Policies + ChildPolicies *[]SubResource `json:"childPolicies,omitempty"` + // ThreatIntelMode - The operation mode for Threat Intelligence. Possible values include: 'AzureFirewallThreatIntelModeAlert', 'AzureFirewallThreatIntelModeDeny', 'AzureFirewallThreatIntelModeOff' + ThreatIntelMode AzureFirewallThreatIntelMode `json:"threatIntelMode,omitempty"` +} + +// BasicFirewallPolicyRule properties of the rule. +type BasicFirewallPolicyRule interface { + AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) + AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) + AsFirewallPolicyRule() (*FirewallPolicyRule, bool) +} + +// FirewallPolicyRule properties of the rule. +type FirewallPolicyRule struct { + // Name - Name of the Rule + Name *string `json:"name,omitempty"` + // Priority - Priority of the Firewall Policy Rule resource. + Priority *int32 `json:"priority,omitempty"` + // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule' + RuleType RuleType `json:"ruleType,omitempty"` +} + +func unmarshalBasicFirewallPolicyRule(body []byte) (BasicFirewallPolicyRule, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["ruleType"] { + case string(RuleTypeFirewallPolicyNatRule): + var fpnr FirewallPolicyNatRule + err := json.Unmarshal(body, &fpnr) + return fpnr, err + case string(RuleTypeFirewallPolicyFilterRule): + var fpfr FirewallPolicyFilterRule + err := json.Unmarshal(body, &fpfr) + return fpfr, err + default: + var fpr FirewallPolicyRule + err := json.Unmarshal(body, &fpr) + return fpr, err + } +} +func unmarshalBasicFirewallPolicyRuleArray(body []byte) ([]BasicFirewallPolicyRule, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + fprArray := make([]BasicFirewallPolicyRule, len(rawMessages)) + + for index, rawMessage := range rawMessages { + fpr, err := unmarshalBasicFirewallPolicyRule(*rawMessage) + if err != nil { + return nil, err + } + fprArray[index] = fpr + } + return fprArray, nil +} + +// MarshalJSON is the custom marshaler for FirewallPolicyRule. +func (fpr FirewallPolicyRule) MarshalJSON() ([]byte, error) { + fpr.RuleType = RuleTypeFirewallPolicyRule + objectMap := make(map[string]interface{}) + if fpr.Name != nil { + objectMap["name"] = fpr.Name + } + if fpr.Priority != nil { + objectMap["priority"] = fpr.Priority + } + if fpr.RuleType != "" { + objectMap["ruleType"] = fpr.RuleType + } + return json.Marshal(objectMap) +} + +// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule. +func (fpr FirewallPolicyRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) { + return nil, false +} + +// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule. +func (fpr FirewallPolicyRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) { + return nil, false +} + +// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule. +func (fpr FirewallPolicyRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) { + return &fpr, true +} + +// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule. +func (fpr FirewallPolicyRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) { + return &fpr, true +} + +// BasicFirewallPolicyRuleCondition properties of a rule. +type BasicFirewallPolicyRuleCondition interface { + AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) + AsRuleCondition() (*RuleCondition, bool) + AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) +} + +// FirewallPolicyRuleCondition properties of a rule. +type FirewallPolicyRuleCondition struct { + // Name - Name of the rule condition. + Name *string `json:"name,omitempty"` + // Description - Description of the rule condition. + Description *string `json:"description,omitempty"` + // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition' + RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"` +} + +func unmarshalBasicFirewallPolicyRuleCondition(body []byte) (BasicFirewallPolicyRuleCondition, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["ruleConditionType"] { + case string(RuleConditionTypeApplicationRuleCondition): + var arc ApplicationRuleCondition + err := json.Unmarshal(body, &arc) + return arc, err + case string(RuleConditionTypeNetworkRuleCondition): + var rc RuleCondition + err := json.Unmarshal(body, &rc) + return rc, err + default: + var fprc FirewallPolicyRuleCondition + err := json.Unmarshal(body, &fprc) + return fprc, err + } +} +func unmarshalBasicFirewallPolicyRuleConditionArray(body []byte) ([]BasicFirewallPolicyRuleCondition, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + fprcArray := make([]BasicFirewallPolicyRuleCondition, len(rawMessages)) + + for index, rawMessage := range rawMessages { + fprc, err := unmarshalBasicFirewallPolicyRuleCondition(*rawMessage) + if err != nil { + return nil, err + } + fprcArray[index] = fprc + } + return fprcArray, nil +} + +// MarshalJSON is the custom marshaler for FirewallPolicyRuleCondition. +func (fprc FirewallPolicyRuleCondition) MarshalJSON() ([]byte, error) { + fprc.RuleConditionType = RuleConditionTypeFirewallPolicyRuleCondition + objectMap := make(map[string]interface{}) + if fprc.Name != nil { + objectMap["name"] = fprc.Name + } + if fprc.Description != nil { + objectMap["description"] = fprc.Description + } + if fprc.RuleConditionType != "" { + objectMap["ruleConditionType"] = fprc.RuleConditionType + } + return json.Marshal(objectMap) +} + +// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition. +func (fprc FirewallPolicyRuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) { + return nil, false +} + +// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition. +func (fprc FirewallPolicyRuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return nil, false +} + +// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition. +func (fprc FirewallPolicyRuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) { + return &fprc, true +} + +// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition. +func (fprc FirewallPolicyRuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) { + return &fprc, true +} + +// FirewallPolicyRuleConditionApplicationProtocol properties of the application rule protocol. +type FirewallPolicyRuleConditionApplicationProtocol struct { + // ProtocolType - Protocol type. Possible values include: 'FirewallPolicyRuleConditionApplicationProtocolTypeHTTP', 'FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS' + ProtocolType FirewallPolicyRuleConditionApplicationProtocolType `json:"protocolType,omitempty"` + // Port - Port number for the protocol, cannot be greater than 64000. + Port *int32 `json:"port,omitempty"` +} + +// FirewallPolicyRuleGroup rule Group resource +type FirewallPolicyRuleGroup struct { + autorest.Response `json:"-"` + // FirewallPolicyRuleGroupProperties - The properties of the firewall policy rule group. + *FirewallPolicyRuleGroupProperties `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - READ-ONLY; Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // Type - READ-ONLY; Rule Group type. + Type *string `json:"type,omitempty"` + // ID - Resource ID. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for FirewallPolicyRuleGroup. +func (fprg FirewallPolicyRuleGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fprg.FirewallPolicyRuleGroupProperties != nil { + objectMap["properties"] = fprg.FirewallPolicyRuleGroupProperties + } + if fprg.Name != nil { + objectMap["name"] = fprg.Name + } + if fprg.ID != nil { + objectMap["id"] = fprg.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FirewallPolicyRuleGroup struct. +func (fprg *FirewallPolicyRuleGroup) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var firewallPolicyRuleGroupProperties FirewallPolicyRuleGroupProperties + err = json.Unmarshal(*v, &firewallPolicyRuleGroupProperties) + if err != nil { + return err + } + fprg.FirewallPolicyRuleGroupProperties = &firewallPolicyRuleGroupProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fprg.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fprg.Etag = &etag + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fprg.Type = &typeVar + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fprg.ID = &ID + } + } + } + + return nil +} + +// FirewallPolicyRuleGroupListResult response for ListFirewallPolicyRuleGroups API service call. +type FirewallPolicyRuleGroupListResult struct { + autorest.Response `json:"-"` + // Value - List of FirewallPolicyRuleGroups in a FirewallPolicy. + Value *[]FirewallPolicyRuleGroup `json:"value,omitempty"` + // NextLink - URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// FirewallPolicyRuleGroupListResultIterator provides access to a complete listing of +// FirewallPolicyRuleGroup values. +type FirewallPolicyRuleGroupListResultIterator struct { i int - page ExpressRouteServiceProviderListResultPage + page FirewallPolicyRuleGroupListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *FirewallPolicyRuleGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -13262,62 +14516,62 @@ func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx c // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ExpressRouteServiceProviderListResultIterator) Next() error { +func (iter *FirewallPolicyRuleGroupListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ExpressRouteServiceProviderListResultIterator) NotDone() bool { +func (iter FirewallPolicyRuleGroupListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ExpressRouteServiceProviderListResultIterator) Response() ExpressRouteServiceProviderListResult { +func (iter FirewallPolicyRuleGroupListResultIterator) Response() FirewallPolicyRuleGroupListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ExpressRouteServiceProviderListResultIterator) Value() ExpressRouteServiceProvider { +func (iter FirewallPolicyRuleGroupListResultIterator) Value() FirewallPolicyRuleGroup { if !iter.page.NotDone() { - return ExpressRouteServiceProvider{} + return FirewallPolicyRuleGroup{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ExpressRouteServiceProviderListResultIterator type. -func NewExpressRouteServiceProviderListResultIterator(page ExpressRouteServiceProviderListResultPage) ExpressRouteServiceProviderListResultIterator { - return ExpressRouteServiceProviderListResultIterator{page: page} +// Creates a new instance of the FirewallPolicyRuleGroupListResultIterator type. +func NewFirewallPolicyRuleGroupListResultIterator(page FirewallPolicyRuleGroupListResultPage) FirewallPolicyRuleGroupListResultIterator { + return FirewallPolicyRuleGroupListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (ersplr ExpressRouteServiceProviderListResult) IsEmpty() bool { - return ersplr.Value == nil || len(*ersplr.Value) == 0 +func (fprglr FirewallPolicyRuleGroupListResult) IsEmpty() bool { + return fprglr.Value == nil || len(*fprglr.Value) == 0 } -// expressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. +// firewallPolicyRuleGroupListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (ersplr ExpressRouteServiceProviderListResult) expressRouteServiceProviderListResultPreparer(ctx context.Context) (*http.Request, error) { - if ersplr.NextLink == nil || len(to.String(ersplr.NextLink)) < 1 { +func (fprglr FirewallPolicyRuleGroupListResult) firewallPolicyRuleGroupListResultPreparer(ctx context.Context) (*http.Request, error) { + if fprglr.NextLink == nil || len(to.String(fprglr.NextLink)) < 1 { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(ersplr.NextLink))) + autorest.WithBaseURL(to.String(fprglr.NextLink))) } -// ExpressRouteServiceProviderListResultPage contains a page of ExpressRouteServiceProvider values. -type ExpressRouteServiceProviderListResultPage struct { - fn func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error) - ersplr ExpressRouteServiceProviderListResult +// FirewallPolicyRuleGroupListResultPage contains a page of FirewallPolicyRuleGroup values. +type FirewallPolicyRuleGroupListResultPage struct { + fn func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error) + fprglr FirewallPolicyRuleGroupListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *FirewallPolicyRuleGroupListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -13326,52 +14580,145 @@ func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx conte tracing.EndSpan(ctx, sc, err) }() } - next, err := page.fn(ctx, page.ersplr) + next, err := page.fn(ctx, page.fprglr) if err != nil { return err } - page.ersplr = next + page.fprglr = next return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *ExpressRouteServiceProviderListResultPage) Next() error { +func (page *FirewallPolicyRuleGroupListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ExpressRouteServiceProviderListResultPage) NotDone() bool { - return !page.ersplr.IsEmpty() +func (page FirewallPolicyRuleGroupListResultPage) NotDone() bool { + return !page.fprglr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page ExpressRouteServiceProviderListResultPage) Response() ExpressRouteServiceProviderListResult { - return page.ersplr +func (page FirewallPolicyRuleGroupListResultPage) Response() FirewallPolicyRuleGroupListResult { + return page.fprglr } // Values returns the slice of values for the current page or nil if there are no values. -func (page ExpressRouteServiceProviderListResultPage) Values() []ExpressRouteServiceProvider { - if page.ersplr.IsEmpty() { +func (page FirewallPolicyRuleGroupListResultPage) Values() []FirewallPolicyRuleGroup { + if page.fprglr.IsEmpty() { return nil } - return *page.ersplr.Value + return *page.fprglr.Value } -// Creates a new instance of the ExpressRouteServiceProviderListResultPage type. -func NewExpressRouteServiceProviderListResultPage(getNextPage func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)) ExpressRouteServiceProviderListResultPage { - return ExpressRouteServiceProviderListResultPage{fn: getNextPage} +// Creates a new instance of the FirewallPolicyRuleGroupListResultPage type. +func NewFirewallPolicyRuleGroupListResultPage(getNextPage func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error)) FirewallPolicyRuleGroupListResultPage { + return FirewallPolicyRuleGroupListResultPage{fn: getNextPage} } -// ExpressRouteServiceProviderPropertiesFormat properties of ExpressRouteServiceProvider. -type ExpressRouteServiceProviderPropertiesFormat struct { - // PeeringLocations - Get a list of peering locations. - PeeringLocations *[]string `json:"peeringLocations,omitempty"` - // BandwidthsOffered - Gets bandwidths offered. - BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` - // ProvisioningState - Gets the provisioning state of the resource. - ProvisioningState *string `json:"provisioningState,omitempty"` +// FirewallPolicyRuleGroupProperties properties of the rule group. +type FirewallPolicyRuleGroupProperties struct { + // Priority - Priority of the Firewall Policy Rule Group resource. + Priority *int32 `json:"priority,omitempty"` + // Rules - Group of Firewall Policy rules. + Rules *[]BasicFirewallPolicyRule `json:"rules,omitempty"` + // ProvisioningState - The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for FirewallPolicyRuleGroupProperties struct. +func (fprgp *FirewallPolicyRuleGroupProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "priority": + if v != nil { + var priority int32 + err = json.Unmarshal(*v, &priority) + if err != nil { + return err + } + fprgp.Priority = &priority + } + case "rules": + if v != nil { + rules, err := unmarshalBasicFirewallPolicyRuleArray(*v) + if err != nil { + return err + } + fprgp.Rules = &rules + } + case "provisioningState": + if v != nil { + var provisioningState ProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + fprgp.ProvisioningState = provisioningState + } + } + } + + return nil +} + +// FirewallPolicyRuleGroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type FirewallPolicyRuleGroupsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *FirewallPolicyRuleGroupsCreateOrUpdateFuture) Result(client FirewallPolicyRuleGroupsClient) (fprg FirewallPolicyRuleGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fprg.Response.Response, err = future.GetResult(sender); err == nil && fprg.Response.Response.StatusCode != http.StatusNoContent { + fprg, err = client.CreateOrUpdateResponder(fprg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", fprg.Response.Response, "Failure responding to request") + } + } + return +} + +// FirewallPolicyRuleGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type FirewallPolicyRuleGroupsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *FirewallPolicyRuleGroupsDeleteFuture) Result(client FirewallPolicyRuleGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return } // FlowLogFormatParameters parameters that define the flow log format. @@ -13653,6 +15000,14 @@ type HTTPHeader struct { Value *string `json:"value,omitempty"` } +// HubIPAddresses IP addresses associated with azure firewall. +type HubIPAddresses struct { + // PublicIPAddresses - List of Public IP addresses associated with azure firewall. + PublicIPAddresses *[]AzureFirewallPublicIPAddress `json:"publicIPAddresses,omitempty"` + // PrivateIPAddress - Private IP Address associated with azure firewall. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` +} + // HubVirtualNetworkConnection hubVirtualNetworkConnection Resource. type HubVirtualNetworkConnection struct { autorest.Response `json:"-"` @@ -21115,7 +22470,7 @@ type PrivateEndpoint struct { autorest.Response `json:"-"` // PrivateEndpointProperties - Properties of the private endpoint. *PrivateEndpointProperties `json:"properties,omitempty"` - // Etag - Gets a unique read-only string that changes whenever the resource is updated. + // Etag - A unique read-only string that changes whenever the resource is updated. Etag *string `json:"etag,omitempty"` // ID - Resource ID. ID *string `json:"id,omitempty"` @@ -21235,6 +22590,10 @@ type PrivateEndpointConnection struct { *PrivateEndpointConnectionProperties `json:"properties,omitempty"` // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The resource type. + Type *string `json:"type,omitempty"` + // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` // ID - Resource ID. ID *string `json:"id,omitempty"` } @@ -21281,6 +22640,24 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { } pec.Name = &name } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + pec.Type = &typeVar + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + pec.Etag = &etag + } case "id": if v != nil { var ID string @@ -21302,6 +22679,8 @@ type PrivateEndpointConnectionProperties struct { PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider. PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` + // ProvisioningState - The provisioning state of the private endpoint connection. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` } // PrivateEndpointListResult response for the ListPrivateEndpoints API service call. @@ -21456,8 +22835,8 @@ type PrivateEndpointProperties struct { Subnet *Subnet `json:"subnet,omitempty"` // NetworkInterfaces - READ-ONLY; Gets an array of references to the network interfaces created for this private endpoint. NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state of the private endpoint. Possible values are: 'Updating', 'Deleting', and 'Failed'. - ProvisioningState *string `json:"provisioningState,omitempty"` + // ProvisioningState - The provisioning state of the private endpoint. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` // PrivateLinkServiceConnections - A grouping of information about the connection to the remote resource. PrivateLinkServiceConnections *[]PrivateLinkServiceConnection `json:"privateLinkServiceConnections,omitempty"` // ManualPrivateLinkServiceConnections - A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource. @@ -21521,7 +22900,7 @@ type PrivateLinkService struct { autorest.Response `json:"-"` // PrivateLinkServiceProperties - Properties of the private link service. *PrivateLinkServiceProperties `json:"properties,omitempty"` - // Etag - Gets a unique read-only string that changes whenever the resource is updated. + // Etag - A unique read-only string that changes whenever the resource is updated. Etag *string `json:"etag,omitempty"` // ID - Resource ID. ID *string `json:"id,omitempty"` @@ -21640,6 +23019,10 @@ type PrivateLinkServiceConnection struct { *PrivateLinkServiceConnectionProperties `json:"properties,omitempty"` // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The resource type. + Type *string `json:"type,omitempty"` + // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` // ID - Resource ID. ID *string `json:"id,omitempty"` } @@ -21686,6 +23069,24 @@ func (plsc *PrivateLinkServiceConnection) UnmarshalJSON(body []byte) error { } plsc.Name = &name } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + plsc.Type = &typeVar + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + plsc.Etag = &etag + } case "id": if v != nil { var ID string @@ -21703,6 +23104,8 @@ func (plsc *PrivateLinkServiceConnection) UnmarshalJSON(body []byte) error { // PrivateLinkServiceConnectionProperties properties of the PrivateLinkServiceConnection. type PrivateLinkServiceConnectionProperties struct { + // ProvisioningState - The provisioning state of the private link service connection. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` // PrivateLinkServiceID - The resource id of private link service. PrivateLinkServiceID *string `json:"privateLinkServiceId,omitempty"` // GroupIds - The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to. @@ -21730,6 +23133,12 @@ type PrivateLinkServiceIPConfiguration struct { *PrivateLinkServiceIPConfigurationProperties `json:"properties,omitempty"` // Name - The name of private link service ip configuration. Name *string `json:"name,omitempty"` + // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // Type - READ-ONLY; The resource type. + Type *string `json:"type,omitempty"` + // ID - Resource ID. + ID *string `json:"id,omitempty"` } // MarshalJSON is the custom marshaler for PrivateLinkServiceIPConfiguration. @@ -21741,6 +23150,9 @@ func (plsic PrivateLinkServiceIPConfiguration) MarshalJSON() ([]byte, error) { if plsic.Name != nil { objectMap["name"] = plsic.Name } + if plsic.ID != nil { + objectMap["id"] = plsic.ID + } return json.Marshal(objectMap) } @@ -21771,6 +23183,33 @@ func (plsic *PrivateLinkServiceIPConfiguration) UnmarshalJSON(body []byte) error } plsic.Name = &name } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + plsic.Etag = &etag + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + plsic.Type = &typeVar + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + plsic.ID = &ID + } } } @@ -21785,10 +23224,10 @@ type PrivateLinkServiceIPConfigurationProperties struct { PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` // Subnet - The reference of the subnet resource. Subnet *Subnet `json:"subnet,omitempty"` - // PublicIPAddress - The reference of the public IP resource. - PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` - // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. - ProvisioningState *string `json:"provisioningState,omitempty"` + // Primary - Whether the ip configuration is primary or not. + Primary *bool `json:"primary,omitempty"` + // ProvisioningState - The provisioning state of the private link service ip configuration. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` // PrivateIPAddressVersion - Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values include: 'IPv4', 'IPv6' PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` } @@ -21947,8 +23386,8 @@ type PrivateLinkServiceProperties struct { IPConfigurations *[]PrivateLinkServiceIPConfiguration `json:"ipConfigurations,omitempty"` // NetworkInterfaces - READ-ONLY; Gets an array of references to the network interfaces created for this private link service. NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state of the private link service. Possible values are: 'Updating', 'Succeeded', and 'Failed'. - ProvisioningState *string `json:"provisioningState,omitempty"` + // ProvisioningState - The provisioning state of the private link service. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` // PrivateEndpointConnections - An array of list about connections to the private endpoint. PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` // Visibility - The visibility list of the private link service. @@ -23283,6 +24722,8 @@ type PublicIPPrefixPropertiesFormat struct { IPPrefix *string `json:"ipPrefix,omitempty"` // PublicIPAddresses - The list of all referenced PublicIPAddresses. PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"` + // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix. + LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"` // ResourceGUID - The resource GUID property of the public IP prefix resource. ResourceGUID *string `json:"resourceGuid,omitempty"` // ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. @@ -24792,6 +26233,72 @@ func (future *RouteTablesUpdateTagsFuture) Result(client RouteTablesClient) (rt return } +// RuleCondition rule condition of type network +type RuleCondition struct { + // IPProtocols - Array of FirewallPolicyRuleConditionNetworkProtocols. + IPProtocols *[]FirewallPolicyRuleConditionNetworkProtocol `json:"ipProtocols,omitempty"` + // SourceAddresses - List of source IP addresses for this rule. + SourceAddresses *[]string `json:"sourceAddresses,omitempty"` + // DestinationAddresses - List of destination IP addresses or Service Tags. + DestinationAddresses *[]string `json:"destinationAddresses,omitempty"` + // DestinationPorts - List of destination ports. + DestinationPorts *[]string `json:"destinationPorts,omitempty"` + // Name - Name of the rule condition. + Name *string `json:"name,omitempty"` + // Description - Description of the rule condition. + Description *string `json:"description,omitempty"` + // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition' + RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"` +} + +// MarshalJSON is the custom marshaler for RuleCondition. +func (rc RuleCondition) MarshalJSON() ([]byte, error) { + rc.RuleConditionType = RuleConditionTypeNetworkRuleCondition + objectMap := make(map[string]interface{}) + if rc.IPProtocols != nil { + objectMap["ipProtocols"] = rc.IPProtocols + } + if rc.SourceAddresses != nil { + objectMap["sourceAddresses"] = rc.SourceAddresses + } + if rc.DestinationAddresses != nil { + objectMap["destinationAddresses"] = rc.DestinationAddresses + } + if rc.DestinationPorts != nil { + objectMap["destinationPorts"] = rc.DestinationPorts + } + if rc.Name != nil { + objectMap["name"] = rc.Name + } + if rc.Description != nil { + objectMap["description"] = rc.Description + } + if rc.RuleConditionType != "" { + objectMap["ruleConditionType"] = rc.RuleConditionType + } + return json.Marshal(objectMap) +} + +// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) { + return nil, false +} + +// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return &rc, true +} + +// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) { + return nil, false +} + +// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) { + return &rc, true +} + // SecurityGroup networkSecurityGroup resource. type SecurityGroup struct { autorest.Response `json:"-"` @@ -26709,6 +28216,29 @@ func (future *SubnetsPrepareNetworkPoliciesFuture) Result(client SubnetsClient) return } +// SubnetsUnprepareNetworkPoliciesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type SubnetsUnprepareNetworkPoliciesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SubnetsUnprepareNetworkPoliciesFuture) Result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsUnprepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsUnprepareNetworkPoliciesFuture") + return + } + ar.Response = future.Response() + return +} + // SubResource reference to another subresource. type SubResource struct { // ID - Resource ID. @@ -26909,6 +28439,12 @@ type TunnelConnectionHealth struct { LastConnectionEstablishedUtcTime *string `json:"lastConnectionEstablishedUtcTime,omitempty"` } +// UnprepareNetworkPoliciesRequest details of UnprepareNetworkPolicies for Subnet. +type UnprepareNetworkPoliciesRequest struct { + // ServiceName - The name of the service for which subnet is being unprepared for. + ServiceName *string `json:"serviceName,omitempty"` +} + // Usage describes network resource usage. type Usage struct { // ID - READ-ONLY; Resource identifier. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privatelinkservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privatelinkservices.go index 4c31537a0f2..98ed330c4f6 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privatelinkservices.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privatelinkservices.go @@ -1019,6 +1019,8 @@ func (client PrivateLinkServicesClient) UpdatePrivateEndpointConnectionPreparer( "api-version": APIVersion, } + parameters.Type = nil + parameters.Etag = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/servicetags.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/servicetags.go index 962c21b801e..f7d39cf5565 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/servicetags.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/servicetags.go @@ -42,7 +42,9 @@ func NewServiceTagsClientWithBaseURI(baseURI string, subscriptionID string) Serv // List gets a list of service tag information resources. // Parameters: -// location - the location. +// location - the location that will be used as a reference for version (not as a filter based on location, you +// will get the list of service tags with prefix details across all regions but limited to the cloud that your +// subscription belongs to). func (client ServiceTagsClient) List(ctx context.Context, location string) (result ServiceTagsListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServiceTagsClient.List") diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/subnets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/subnets.go index ec9af9af3a9..a2f917cad06 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/subnets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/subnets.go @@ -479,3 +479,85 @@ func (client SubnetsClient) PrepareNetworkPoliciesResponder(resp *http.Response) result.Response = resp return } + +// UnprepareNetworkPolicies unprepares a subnet by removing network intent policies. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// subnetName - the name of the subnet. +// unprepareNetworkPoliciesRequestParameters - parameters supplied to unprepare subnet to remove network intent +// policies. +func (client SubnetsClient) UnprepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters UnprepareNetworkPoliciesRequest) (result SubnetsUnprepareNetworkPoliciesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.UnprepareNetworkPolicies") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UnprepareNetworkPoliciesPreparer(ctx, resourceGroupName, virtualNetworkName, subnetName, unprepareNetworkPoliciesRequestParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "UnprepareNetworkPolicies", nil, "Failure preparing request") + return + } + + result, err = client.UnprepareNetworkPoliciesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "UnprepareNetworkPolicies", result.Response(), "Failure sending request") + return + } + + return +} + +// UnprepareNetworkPoliciesPreparer prepares the UnprepareNetworkPolicies request. +func (client SubnetsClient) UnprepareNetworkPoliciesPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters UnprepareNetworkPoliciesRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies", pathParameters), + autorest.WithJSON(unprepareNetworkPoliciesRequestParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UnprepareNetworkPoliciesSender sends the UnprepareNetworkPolicies request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) UnprepareNetworkPoliciesSender(req *http.Request) (future SubnetsUnprepareNetworkPoliciesFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UnprepareNetworkPoliciesResponder handles the response to the UnprepareNetworkPolicies request. The method always +// closes the http.Response Body. +func (client SubnetsClient) UnprepareNetworkPoliciesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go index 35951876016..7875cf5ea19 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go @@ -734,7 +734,7 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsResponder(resp *h err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go index 97e2ee41957..cd31f47b633 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go @@ -1542,7 +1542,7 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsResponder(resp *http.Respon err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go index fd39477999e..d37d3a30a93 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go @@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} } +// CalculateTemplateHash calculate the hash of the given template. +// Parameters: +// templateParameter - the template provided to calculate hash. +func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request") + return + } + + resp, err := client.CalculateTemplateHashSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request") + return + } + + result, err = client.CalculateTemplateHashResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request") + } + + return +} + +// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request. +func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) { + const APIVersion = "2017-05-10" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"), + autorest.WithJSON(templateParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is // canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running // template deployment and leaves the resource group partially deployed. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/models.go index 3ad29548688..ca3a353a91f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/models.go @@ -85,6 +85,11 @@ type BasicDependency struct { ResourceName *string `json:"resourceName,omitempty"` } +// CloudError an error response for a resource management request. +type CloudError struct { + Error *ErrorResponse `json:"error,omitempty"` +} + // CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type CreateOrUpdateByIDFuture struct { @@ -664,6 +669,28 @@ type DeploymentValidateResult struct { Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` } +// ErrorAdditionalInfo the resource management error additional info. +type ErrorAdditionalInfo struct { + // Type - READ-ONLY; The additional info type. + Type *string `json:"type,omitempty"` + // Info - READ-ONLY; The additional info. + Info interface{} `json:"info,omitempty"` +} + +// ErrorResponse the resource management error response. +type ErrorResponse struct { + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` + // Target - READ-ONLY; The error target. + Target *string `json:"target,omitempty"` + // Details - READ-ONLY; The error details. + Details *[]ErrorResponse `json:"details,omitempty"` + // AdditionalInfo - READ-ONLY; The error additional info. + AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"` +} + // ExportTemplateRequest export resource group template request parameters. type ExportTemplateRequest struct { // ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'. @@ -1667,6 +1694,16 @@ type TargetResource struct { ResourceType *string `json:"resourceType,omitempty"` } +// TemplateHashResult result of the request to calculate template hash. It contains a string of minified +// template and its hash. +type TemplateHashResult struct { + autorest.Response `json:"-"` + // MinifiedTemplate - The minified template string. + MinifiedTemplate *string `json:"minifiedTemplate,omitempty"` + // TemplateHash - The template hash. + TemplateHash *string `json:"templateHash,omitempty"` +} + // TemplateLink entity representing the reference to the template. type TemplateLink struct { // URI - The URI of the template to deploy. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go index 86bb049aaec..9b5daba19e5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go @@ -164,6 +164,16 @@ func (client AccountsClient) Create(ctx context.Context, resourceGroupName strin {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.NetBiosDomainName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.ForestName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainGUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainSid", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.AzureStorageSid", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, }}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { @@ -518,13 +528,13 @@ func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result // List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use // the ListKeys operation for this. -func (client AccountsClient) List(ctx context.Context) (result AccountListResult, err error) { +func (client AccountsClient) List(ctx context.Context) (result AccountListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") defer func() { sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode + if result.alr.Response.Response != nil { + sc = result.alr.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() @@ -535,6 +545,7 @@ func (client AccountsClient) List(ctx context.Context) (result AccountListResult return result, validation.NewError("storage.AccountsClient", "List", err.Error()) } + result.fn = client.listNextResults req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") @@ -543,12 +554,12 @@ func (client AccountsClient) List(ctx context.Context) (result AccountListResult resp, err := client.ListSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} + result.alr.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") return } - result, err = client.ListResponder(resp) + result.alr, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") } @@ -595,6 +606,43 @@ func (client AccountsClient) ListResponder(resp *http.Response) (result AccountL return } +// listNextResults retrieves the next set of results, if any. +func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) { + req, err := lastResults.accountListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountsClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + // ListAccountSAS list SAS credentials of a storage account. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case @@ -779,13 +827,14 @@ func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) ( return } -// ListKeys lists the access keys for the specified storage account. +// ListKeys lists the access keys or Kerberos keys (if active directory enabled) for the specified storage account. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { +// expand - specifies type of the key to be listed. Possible value is kerb. +func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (result AccountListKeysResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys") defer func() { @@ -809,7 +858,7 @@ func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName str return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error()) } - req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName) + req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, expand) if err != nil { err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") return @@ -831,7 +880,7 @@ func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName str } // ListKeysPreparer prepares the ListKeys request. -func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { +func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -842,6 +891,9 @@ func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroup queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsPost(), @@ -970,13 +1022,13 @@ func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (resul return } -// RegenerateKey regenerates one of the access keys for the specified storage account. +// RegenerateKey regenerates one of the access keys or Kerberos keys for the specified storage account. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// regenerateKey - specifies name of the key which should be regenerated -- key1 or key2. +// regenerateKey - specifies name of the key which should be regenerated -- key1, key2, kerb1, kerb2. func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey") diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go index 1ae2999d204..e9eb9cb0d94 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go @@ -135,6 +135,98 @@ func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Respon return } +// List list blob services of storage account. It returns a collection of one object named default. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage // Analytics and CORS (Cross-Origin Resource Sharing) rules. // Parameters: diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go new file mode 100644 index 00000000000..8de550d6af1 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go @@ -0,0 +1,326 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FileServicesClient is the the Azure Storage Management API. +type FileServicesClient struct { + BaseClient +} + +// NewFileServicesClient creates an instance of the FileServicesClient client. +func NewFileServicesClient(subscriptionID string) FileServicesClient { + return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client. +func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient { + return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error()) + } + + req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.GetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request") + } + + return +} + +// GetServicePropertiesPreparer prepares the GetServiceProperties request. +func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "FileServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always +// closes the http.Response Body. +func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list all file services in storage accounts +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error()) + } + + req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.SetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request") + } + + return +} + +// SetServicePropertiesPreparer prepares the SetServiceProperties request. +func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "FileServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always +// closes the http.Response Body. +func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go new file mode 100644 index 00000000000..560e3a1033a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go @@ -0,0 +1,594 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FileSharesClient is the the Azure Storage Management API. +type FileSharesClient struct { + BaseClient +} + +// NewFileSharesClient creates an instance of the FileSharesClient client. +func NewFileSharesClient(subscriptionID string) FileSharesClient { + return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client. +func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient { + return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a new share under the specified account as described by request body. The share resource includes +// metadata and properties for that share. It does not include a list of the files contained by the share. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// fileShare - properties of the file share to create. +func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: fileShare, + Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(5120), Chain: nil}, + {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}, + }}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithJSON(fileShare), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes specified share under its account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets properties of a specified share. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all shares. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// skipToken - optional. Continuation token for the list operation. +// maxpagesize - optional. Specified maximum number of shares that can be included in the list. +// filter - optional. When specified, only share names starting with the filter will be listed. +func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result FileShareItemsPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") + defer func() { + sc := -1 + if result.fsi.Response.Response != nil { + sc = result.fsi.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.fsi.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request") + return + } + + result.fsi, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(skipToken) > 0 { + queryParameters["$skipToken"] = autorest.Encode("query", skipToken) + } + if len(maxpagesize) > 0 { + queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) { + req, err := lastResults.fileShareItemsPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result FileShareItemsIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter) + return +} + +// Update updates share properties as specified in request body. Properties not mentioned in the request will not be +// changed. Update fails if the specified share does not already exist. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// fileShare - properties to update for the file share. +func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithJSON(fileShare), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go index 83ae6dded4e..b6326d0faba 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go @@ -148,13 +148,15 @@ type DirectoryServiceOptions string const ( // DirectoryServiceOptionsAADDS ... DirectoryServiceOptionsAADDS DirectoryServiceOptions = "AADDS" + // DirectoryServiceOptionsAD ... + DirectoryServiceOptionsAD DirectoryServiceOptions = "AD" // DirectoryServiceOptionsNone ... DirectoryServiceOptionsNone DirectoryServiceOptions = "None" ) // PossibleDirectoryServiceOptionsValues returns an array of possible values for the DirectoryServiceOptions const type. func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions { - return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsNone} + return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsAD, DirectoryServiceOptionsNone} } // GeoReplicationStatus enumerates the values for geo replication status. @@ -272,6 +274,21 @@ func PossibleKindValues() []Kind { return []Kind{BlobStorage, BlockBlobStorage, FileStorage, Storage, StorageV2} } +// LargeFileSharesState enumerates the values for large file shares state. +type LargeFileSharesState string + +const ( + // Disabled ... + Disabled LargeFileSharesState = "Disabled" + // Enabled ... + Enabled LargeFileSharesState = "Enabled" +) + +// PossibleLargeFileSharesStateValues returns an array of possible values for the LargeFileSharesState const type. +func PossibleLargeFileSharesStateValues() []LargeFileSharesState { + return []LargeFileSharesState{Disabled, Enabled} +} + // LeaseDuration enumerates the values for lease duration. type LeaseDuration string @@ -323,6 +340,19 @@ func PossibleLeaseStatusValues() []LeaseStatus { return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked} } +// ListKeyExpand enumerates the values for list key expand. +type ListKeyExpand string + +const ( + // Kerb ... + Kerb ListKeyExpand = "kerb" +) + +// PossibleListKeyExpandValues returns an array of possible values for the ListKeyExpand const type. +func PossibleListKeyExpandValues() []ListKeyExpand { + return []ListKeyExpand{Kerb} +} + // Permissions enumerates the values for permissions. type Permissions string @@ -832,6 +862,145 @@ type AccountListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; Gets the list of storage accounts and their properties. Value *[]Account `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of storage accounts. Returned when total number of requested storage accounts exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// AccountListResultIterator provides access to a complete listing of Account values. +type AccountListResultIterator struct { + i int + page AccountListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AccountListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AccountListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AccountListResultIterator) Response() AccountListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AccountListResultIterator) Value() Account { + if !iter.page.NotDone() { + return Account{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AccountListResultIterator type. +func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator { + return AccountListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (alr AccountListResult) IsEmpty() bool { + return alr.Value == nil || len(*alr.Value) == 0 +} + +// accountListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) { + if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(alr.NextLink))) +} + +// AccountListResultPage contains a page of Account values. +type AccountListResultPage struct { + fn func(context.Context, AccountListResult) (AccountListResult, error) + alr AccountListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.alr) + if err != nil { + return err + } + page.alr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AccountListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AccountListResultPage) NotDone() bool { + return !page.alr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AccountListResultPage) Response() AccountListResult { + return page.alr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AccountListResultPage) Values() []Account { + if page.alr.IsEmpty() { + return nil + } + return *page.alr.Value +} + +// Creates a new instance of the AccountListResultPage type. +func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage { + return AccountListResultPage{fn: getNextPage} } // AccountProperties properties of the storage account. @@ -872,13 +1041,15 @@ type AccountProperties struct { GeoReplicationStats *GeoReplicationStats `json:"geoReplicationStats,omitempty"` // FailoverInProgress - READ-ONLY; If the failover is in progress, the value will be true, otherwise, it will be null. FailoverInProgress *bool `json:"failoverInProgress,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` } // AccountPropertiesCreateParameters the parameters used to create the storage account. type AccountPropertiesCreateParameters struct { // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // Encryption - Provides the encryption settings on the account. If left unspecified the account encryption settings will remain the same. The default setting is unencrypted. + // Encryption - Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled. Encryption *Encryption `json:"encryption,omitempty"` // NetworkRuleSet - Network rule set NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` @@ -890,6 +1061,8 @@ type AccountPropertiesCreateParameters struct { EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` } // AccountPropertiesUpdateParameters the parameters used when updating a storage account. @@ -906,11 +1079,13 @@ type AccountPropertiesUpdateParameters struct { EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` // NetworkRuleSet - Network rule set NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` } // AccountRegenerateKeyParameters the parameters used to regenerate the storage account key. type AccountRegenerateKeyParameters struct { - // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2. + // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2, kerb1, kerb2. KeyName *string `json:"keyName,omitempty"` } @@ -1082,6 +1257,22 @@ func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { return nil } +// ActiveDirectoryProperties settings properties for Active Directory (AD). +type ActiveDirectoryProperties struct { + // DomainName - Specifies the primary domain that the AD DNS server is authoritative for. + DomainName *string `json:"domainName,omitempty"` + // NetBiosDomainName - Specifies the NetBIOS domain name. + NetBiosDomainName *string `json:"netBiosDomainName,omitempty"` + // ForestName - Specifies the Active Directory forest to get. + ForestName *string `json:"forestName,omitempty"` + // DomainGUID - Specifies the domain GUID. + DomainGUID *string `json:"domainGuid,omitempty"` + // DomainSid - Specifies the security identifier (SID). + DomainSid *string `json:"domainSid,omitempty"` + // AzureStorageSid - Specifies the security identifier (SID) for Azure Storage. + AzureStorageSid *string `json:"azureStorageSid,omitempty"` +} + // AzureEntityResource the resource model definition for a Azure Resource Manager resource with an etag. type AzureEntityResource struct { // Etag - READ-ONLY; Resource Etag. @@ -1096,8 +1287,10 @@ type AzureEntityResource struct { // AzureFilesIdentityBasedAuthentication settings for Azure Files identity based authentication. type AzureFilesIdentityBasedAuthentication struct { - // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS' + // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS', 'DirectoryServiceOptionsAD' DirectoryServiceOptions DirectoryServiceOptions `json:"directoryServiceOptions,omitempty"` + // ActiveDirectoryProperties - Required if choose AD. + ActiveDirectoryProperties *ActiveDirectoryProperties `json:"activeDirectoryProperties,omitempty"` } // BlobContainer properties of the blob container, including Id, resource name, resource type, Etag. @@ -1184,6 +1377,13 @@ func (bc *BlobContainer) UnmarshalJSON(body []byte) error { return nil } +// BlobServiceItems ... +type BlobServiceItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of blob services returned. + Value *[]BlobServiceProperties `json:"value,omitempty"` +} + // BlobServiceProperties the properties of a storage account’s Blob service. type BlobServiceProperties struct { autorest.Response `json:"-"` @@ -1267,6 +1467,14 @@ type BlobServicePropertiesProperties struct { DeleteRetentionPolicy *DeleteRetentionPolicy `json:"deleteRetentionPolicy,omitempty"` // AutomaticSnapshotPolicyEnabled - Automatic Snapshot is enabled if set to true. AutomaticSnapshotPolicyEnabled *bool `json:"automaticSnapshotPolicyEnabled,omitempty"` + // ChangeFeed - The blob service properties for change feed events. + ChangeFeed *ChangeFeed `json:"changeFeed,omitempty"` +} + +// ChangeFeed the blob service properties for change feed events. +type ChangeFeed struct { + // Enabled - Indicates whether change feed event logging is enabled for the Blob service. + Enabled *bool `json:"enabled,omitempty"` } // CheckNameAvailabilityResult the CheckNameAvailability operation response. @@ -1280,6 +1488,23 @@ type CheckNameAvailabilityResult struct { Message *string `json:"message,omitempty"` } +// CloudError an error response from the Storage service. +type CloudError struct { + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody an error response from the Storage service. +type CloudErrorBody struct { + // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + Code *string `json:"code,omitempty"` + // Message - A message describing the error, intended to be suitable for display in a user interface. + Message *string `json:"message,omitempty"` + // Target - The target of the particular error. For example, the name of the property in error. + Target *string `json:"target,omitempty"` + // Details - A list of additional details about the error. + Details *[]CloudErrorBody `json:"details,omitempty"` +} + // ContainerProperties the properties of a container. type ContainerProperties struct { // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' @@ -1419,6 +1644,428 @@ type Endpoints struct { Dfs *string `json:"dfs,omitempty"` } +// FileServiceItems ... +type FileServiceItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of file services returned. + Value *[]FileServiceProperties `json:"value,omitempty"` +} + +// FileServiceProperties the properties of File services in storage account. +type FileServiceProperties struct { + autorest.Response `json:"-"` + // FileServicePropertiesProperties - The properties of File services in storage account. + *FileServicePropertiesProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileServiceProperties. +func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsp.FileServicePropertiesProperties != nil { + objectMap["properties"] = fsp.FileServicePropertiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct. +func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileServiceProperties FileServicePropertiesProperties + err = json.Unmarshal(*v, &fileServiceProperties) + if err != nil { + return err + } + fsp.FileServicePropertiesProperties = &fileServiceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fsp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fsp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fsp.Type = &typeVar + } + } + } + + return nil +} + +// FileServicePropertiesProperties the properties of File services in storage account. +type FileServicePropertiesProperties struct { + // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service. + Cors *CorsRules `json:"cors,omitempty"` +} + +// FileShare properties of the file share, including Id, resource name, resource type, Etag. +type FileShare struct { + autorest.Response `json:"-"` + // FileShareProperties - Properties of the file share. + *FileShareProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShare. +func (fs FileShare) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fs.FileShareProperties != nil { + objectMap["properties"] = fs.FileShareProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileShare struct. +func (fs *FileShare) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileShareProperties FileShareProperties + err = json.Unmarshal(*v, &fileShareProperties) + if err != nil { + return err + } + fs.FileShareProperties = &fileShareProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fs.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fs.Type = &typeVar + } + } + } + + return nil +} + +// FileShareItem the file share properties be listed out. +type FileShareItem struct { + // FileShareProperties - The file share properties be listed out. + *FileShareProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShareItem. +func (fsi FileShareItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsi.FileShareProperties != nil { + objectMap["properties"] = fsi.FileShareProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileShareItem struct. +func (fsi *FileShareItem) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileShareProperties FileShareProperties + err = json.Unmarshal(*v, &fileShareProperties) + if err != nil { + return err + } + fsi.FileShareProperties = &fileShareProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fsi.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fsi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fsi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fsi.Type = &typeVar + } + } + } + + return nil +} + +// FileShareItems response schema. Contains list of shares returned, and if paging is requested or +// required, a URL to next page of shares. +type FileShareItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of file shares returned. + Value *[]FileShareItem `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// FileShareItemsIterator provides access to a complete listing of FileShareItem values. +type FileShareItemsIterator struct { + i int + page FileShareItemsPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *FileShareItemsIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter FileShareItemsIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter FileShareItemsIterator) Response() FileShareItems { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter FileShareItemsIterator) Value() FileShareItem { + if !iter.page.NotDone() { + return FileShareItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the FileShareItemsIterator type. +func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator { + return FileShareItemsIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (fsi FileShareItems) IsEmpty() bool { + return fsi.Value == nil || len(*fsi.Value) == 0 +} + +// fileShareItemsPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) { + if fsi.NextLink == nil || len(to.String(fsi.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(fsi.NextLink))) +} + +// FileShareItemsPage contains a page of FileShareItem values. +type FileShareItemsPage struct { + fn func(context.Context, FileShareItems) (FileShareItems, error) + fsi FileShareItems +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.fsi) + if err != nil { + return err + } + page.fsi = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *FileShareItemsPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page FileShareItemsPage) NotDone() bool { + return !page.fsi.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page FileShareItemsPage) Response() FileShareItems { + return page.fsi +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page FileShareItemsPage) Values() []FileShareItem { + if page.fsi.IsEmpty() { + return nil + } + return *page.fsi.Value +} + +// Creates a new instance of the FileShareItemsPage type. +func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage { + return FileShareItemsPage{fn: getNextPage} +} + +// FileShareProperties the properties of the file share. +type FileShareProperties struct { + // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Metadata - A name-value pair to associate with the share as metadata. + Metadata map[string]*string `json:"metadata"` + // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). + ShareQuota *int32 `json:"shareQuota,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShareProperties. +func (fsp FileShareProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsp.Metadata != nil { + objectMap["metadata"] = fsp.Metadata + } + if fsp.ShareQuota != nil { + objectMap["shareQuota"] = fsp.ShareQuota + } + return json.Marshal(objectMap) +} + // GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File // services. It is only available when geo-redundant replication is enabled for the storage account. type GeoReplicationStats struct { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index c9c62d799ac..bd19eccc41e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -197,6 +197,47 @@ func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, o return b.respondCreation(resp, BlobTypeBlock) } +// PutBlockFromURLOptions includes the options for a put block from URL operation +type PutBlockFromURLOptions struct { + PutBlockOptions + + SourceContentMD5 string `header:"x-ms-source-content-md5"` + SourceContentCRC64 string `header:"x-ms-source-content-crc64"` +} + +// PutBlockFromURL copy data of exactly specified size from specified URL to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes from a remote URL and the offset and length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url +func (b *Blob) PutBlockFromURL(blockID string, blobURL string, offset int64, size uint64, options *PutBlockFromURLOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + // The value of this header must be set to zero. + // When the length is not zero, the operation will fail with the status code 400 (Bad Request). + headers["Content-Length"] = "0" + headers["x-ms-copy-source"] = blobURL + headers["x-ms-source-range"] = fmt.Sprintf("bytes=%d-%d", offset, uint64(offset)+size-1) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + // PutBlockListOptions includes the options for a put block list operation type PutBlockListOptions struct { Timeout uint diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 16c2988c02b..38c89d8f8d5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v32.5.0" +const Number = "v35.0.0" diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/Protobuild.toml new file mode 100644 index 00000000000..39e5593275c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -0,0 +1,41 @@ +version = "unstable" +generator = "gogoctrd" +plugins = ["grpc", "fieldpath"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["./protobuf"] + + # Paths that should be treated as include roots in relation to the vendor + # directory. These will be calculated with the vendor directory nearest the + # target package. + packages = ["github.com/gogo/protobuf"] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" + +# Lock down runhcs config + +[[descriptors]] +prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" +target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt" +ignore_files = [ + "google/protobuf/descriptor.proto", + "gogoproto/gogo.proto" +] diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/appveyor.yml index a8ec5a59390..1949d1d41df 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/appveyor.yml +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/appveyor.yml @@ -8,22 +8,34 @@ environment: GOPATH: c:\gopath PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH% -stack: go 1.11 +stack: go 1.12.2 build_script: - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL - gometalinter.exe --config .gometalinter.json ./... - - go build ./cmd/wclayer + - go build ./cmd/containerd-shim-runhcs-v1 - go build ./cmd/runhcs - go build ./cmd/tar2ext4 + - go build ./cmd/wclayer + - go build ./internal/tools/grantvmgroupaccess + - go build ./internal/tools/uvmboot + - go build ./internal/tools/zapdir - go test -v ./... -tags admin + - go test -c ./test/containerd-shim-runhcs-v1/ -tags functional + - go test -c ./test/cri-containerd/ -tags functional - go test -c ./test/functional/ -tags functional - - go test -c ./test/runhcs/ -tags integration + - go test -c ./test/runhcs/ -tags functional artifacts: - - path: 'wclayer.exe' + - path: 'containerd-shim-runhcs-v1.exe' - path: 'runhcs.exe' - path: 'tar2ext4.exe' + - path: 'wclayer.exe' + - path: 'grantvmgroupaccess.exe' + - path: 'uvmboot.exe' + - path: 'zapdir.exe' + - path: 'containerd-shim-runhcs-v1.test.exe' + - path: 'cri-containerd.test.exe' - path: 'functional.test.exe' - path: 'runhcs.test.exe' \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go index d58335e5c67..f13d8bfab5a 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go @@ -2,7 +2,7 @@ package hcn import ( "encoding/json" - + "errors" "github.com/Microsoft/hcsshim/internal/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" @@ -299,6 +299,10 @@ func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) + if endpoint.HostComputeNamespace != "" { + return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") + } + jsonString, err := json.Marshal(endpoint) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go index 29d13deac9e..5ac0ed5659d 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go @@ -24,7 +24,7 @@ var ( // HNSVersion1803 added ACL functionality. HNSVersion1803 = Version{Major: 7, Minor: 2} // V2ApiSupport allows the use of V2 Api calls and V2 Schema. - V2ApiSupport = Version{Major: 9, Minor: 1} + V2ApiSupport = Version{Major: 9, Minor: 2} // Remote Subnet allows for Remote Subnet policies on Overlay networks RemoteSubnetVersion = Version{Major: 9, Minor: 2} // A Host Route policy allows for local container to local host communication Overlay networks diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go index b5f1db8b22e..ae58d57cff7 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -2,7 +2,7 @@ package hcn import ( "encoding/json" - + "errors" "github.com/Microsoft/hcsshim/internal/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" @@ -320,6 +320,24 @@ func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { // Create Network. func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) + for _, ipam := range network.Ipams { + for _, subnet := range ipam.Subnets { + if subnet.IpAddressPrefix != "" { + hasDefault := false + for _, route := range subnet.Routes { + if route.NextHop == "" { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { + hasDefault = true + } + } + if !hasDefault { + return nil, errors.New("network create error, no default gateway") + } + } + } + } jsonString, err := json.Marshal(network) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go deleted file mode 100644 index 5d3d0dfef13..00000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go +++ /dev/null @@ -1,100 +0,0 @@ -package guestrequest - -import ( - "github.com/Microsoft/hcsshim/internal/schema2" -) - -// Arguably, many of these (at least CombinedLayers) should have been generated -// by swagger. -// -// This will also change package name due to an inbound breaking change. - -// This class is used by a modify request to add or remove a combined layers -// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath -// using the specified layers as the parent content. Ignores property ScratchPath -// since the container path is already the scratch path. For linux, the GCS unions -// the specified layers and ScratchPath together, placing the resulting union -// filesystem at ContainerRootPath. -type CombinedLayers struct { - ContainerRootPath string `json:"ContainerRootPath,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` - ScratchPath string `json:"ScratchPath,omitempty"` -} - -// Defines the schema for hosted settings passed to GCS and/or OpenGCS - -// SCSI. Scratch space for remote file-system commands, or R/W layer for containers -type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` -} - -type WCOWMappedVirtualDisk struct { - ContainerPath string `json:"ContainerPath,omitempty"` - Lun int32 `json:"Lun,omitempty"` -} - -type LCOWMappedDirectory struct { - MountPath string `json:"MountPath,omitempty"` - Port int32 `json:"Port,omitempty"` - ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported) - ReadOnly bool `json:"ReadOnly,omitempty"` -} - -// Read-only layers over VPMem -type LCOWMappedVPMemDevice struct { - DeviceNumber uint32 `json:"DeviceNumber,omitempty"` - MountPath string `json:"MountPath,omitempty"` // /tmp/pN -} - -type LCOWNetworkAdapter struct { - NamespaceID string `json:",omitempty"` - ID string `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` -} - -type ResourceType string - -const ( - // These are constants for v2 schema modify guest requests. - ResourceTypeMappedDirectory ResourceType = "MappedDirectory" - ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk" - ResourceTypeNetwork ResourceType = "Network" - ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace" - ResourceTypeCombinedLayers ResourceType = "CombinedLayers" - ResourceTypeVPMemDevice ResourceType = "VPMemDevice" -) - -// GuestRequest is for modify commands passed to the guest. -type GuestRequest struct { - RequestType string `json:"RequestType,omitempty"` - ResourceType ResourceType `json:"ResourceType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type NetworkModifyRequest struct { - AdapterId string `json:"AdapterId,omitempty"` - RequestType string `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type RS4NetworkModifyRequest struct { - AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` - RequestType string `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -// SignalProcessOptions is the options passed to either WCOW or LCOW -// to signal a given process. -type SignalProcessOptions struct { - Signal int `json:,omitempty` -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go index f9a922a4bb9..6d909873c2f 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -1,10 +1,12 @@ package hcs import ( + "fmt" "sync" "syscall" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/sirupsen/logrus" ) @@ -40,16 +42,61 @@ var ( ) type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + type notificationChannel chan error type notifcationWatcherContext struct { channels notificationChannels handle hcsCallback + + systemID string + processID int } type notificationChannels map[hcsNotification]notificationChannel -func newChannels() notificationChannels { +func newSystemChannels() notificationChannels { channels := make(notificationChannels) channels[hcsNotificationSystemExited] = make(notificationChannel, 1) @@ -57,17 +104,14 @@ func newChannels() notificationChannels { channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1) channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1) channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) + + return channels +} + +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + channels[hcsNotificationProcessExited] = make(notificationChannel, 1) - channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) - channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1) - channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1) - channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1) - channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1) - channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1) - channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1) return channels } @@ -92,12 +136,28 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt return 0 } + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("") + + // The HCS notification system can grow overtime. We explicitly opt-in to + // the notifications we would like to handle, all others we simply return. + // This means that as it grows we don't have issues associated with new + // notification types the code didn't know about. + switch notificationType { + case hcsNotificationSystemExited, hcsNotificationSystemCreateCompleted, hcsNotificationSystemStartCompleted, hcsNotificationSystemPauseCompleted, hcsNotificationSystemResumeCompleted: + case hcsNotificationProcessExited: + default: + return 0 + } + if channel, ok := context.channels[notificationType]; ok { channel <- result - } else { - logrus.WithFields(logrus.Fields{ - "notification-type": notificationType, - }).Warn("Received a callback of an unsupported type") } return 0 diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 079b565353f..316853a9e31 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -272,6 +272,13 @@ func IsNotSupported(err error) bool { err == ErrVmcomputeUnknownMessage } +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationInvalidState +} + func getInnerError(err error) error { switch pe := err.(type) { case nil: diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go index b0d49cbcf15..5bbb31d152e 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go @@ -27,7 +27,7 @@ import ( //sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? //sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? //sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? //sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? //sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? //sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 41e20bbf992..997a37a2871 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -7,7 +7,6 @@ import ( "syscall" "time" - "github.com/Microsoft/hcsshim/internal/guestrequest" "github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/sirupsen/logrus" @@ -23,6 +22,10 @@ type Process struct { callbackNumber uintptr logctx logrus.Fields + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error } func newProcess(process hcsProcess, processID int, computeSystem *System) *Process { @@ -34,6 +37,7 @@ func newProcess(process hcsProcess, processID int, computeSystem *System) *Proce logfields.ContainerID: computeSystem.ID(), logfields.ProcessID: processID, }, + waitBlock: make(chan struct{}), } } @@ -107,7 +111,11 @@ func (process *Process) logOperationEnd(operation string, err error) { } // Signal signals the process with `options`. -func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) { +// +// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// +// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +func (process *Process) Signal(options interface{}) (err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() @@ -163,33 +171,49 @@ func (process *Process) Kill() (err error) { return nil } -// Wait waits for the process to exit. +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` and `WaitTimeout` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` and +// `WaitTimeout` are safe to call multiple times. +func (process *Process) waitBackground() { + process.waitError = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) + process.closedWaitOnce.Do(func() { + close(process.waitBlock) + }) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the pervious error (if any). func (process *Process) Wait() (err error) { operation := "hcsshim::Process::Wait" process.logOperationBegin(operation) defer func() { process.logOperationEnd(operation, err) }() - err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) - if err != nil { - return makeProcessError(process, operation, err, nil) + <-process.waitBlock + if process.waitError != nil { + return makeProcessError(process, operation, process.waitError, nil) } - return nil } -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. +// WaitTimeout waits for the process to exit or the duration to elapse. If the +// process has already exited returns the pervious error (if any). If a timeout +// occurs returns `ErrTimeout`. func (process *Process) WaitTimeout(timeout time.Duration) (err error) { operation := "hcssshim::Process::WaitTimeout" process.logOperationBegin(operation) defer func() { process.logOperationEnd(operation, err) }() - err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) - if err != nil { - return makeProcessError(process, operation, err, nil) + select { + case <-process.waitBlock: + if process.waitError != nil { + return makeProcessError(process, operation, process.waitError, nil) + } + return nil + case <-time.After(timeout): + return makeProcessError(process, operation, ErrTimeout, nil) } - - return nil } // ResizeConsole resizes the console of the process. @@ -276,15 +300,20 @@ func (process *Process) ExitCode() (_ int, err error) { properties, err := process.Properties() if err != nil { - return 0, makeProcessError(process, operation, err, nil) + return -1, makeProcessError(process, operation, err, nil) } if properties.Exited == false { - return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil) + return -1, makeProcessError(process, operation, ErrInvalidProcessState, nil) } if properties.LastWaitResult != 0 { - return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil) + logrus.WithFields(logrus.Fields{ + logfields.ContainerID: process.SystemID(), + logfields.ProcessID: process.processID, + "wait-result": properties.LastWaitResult, + }).Warn("hcsshim::Process::ExitCode - Non-zero last wait result") + return -1, nil } return int(properties.ExitCode), nil @@ -397,13 +426,18 @@ func (process *Process) Close() (err error) { } process.handle = 0 + process.closedWaitOnce.Do(func() { + close(process.waitBlock) + }) return nil } func (process *Process) registerCallback() error { context := ¬ifcationWatcherContext{ - channels: newChannels(), + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, } callbackMapLock.Lock() @@ -450,7 +484,7 @@ func (process *Process) unregisterCallback() error { closeChannels(context.channels) callbackMapLock.Lock() - callbackMap[callbackNumber] = nil + delete(callbackMap, callbackNumber) callbackMapLock.Unlock() handle = 0 diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 20b242524d0..29a734c1b30 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -43,6 +43,10 @@ type System struct { callbackNumber uintptr logctx logrus.Fields + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error } func newSystem(id string) *System { @@ -51,6 +55,7 @@ func newSystem(id string) *System { logctx: logrus.Fields{ logfields.ContainerID: id, }, + waitBlock: make(chan struct{}), } } @@ -121,6 +126,8 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events) } + go computeSystem.waitBackground() + return computeSystem, nil } @@ -153,6 +160,7 @@ func OpenComputeSystem(id string) (_ *System, err error) { if err = computeSystem.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, operation, "", err, nil) } + go computeSystem.waitBackground() return computeSystem, nil } @@ -280,7 +288,7 @@ func (computeSystem *System) Shutdown() (err error) { operation := "hcsshim::ComputeSystem::Shutdown" computeSystem.logOperationBegin(operation) defer func() { - if IsAlreadyStopped(err) { + if IsAlreadyClosed(err) || IsAlreadyStopped(err) || IsPending(err) { computeSystem.logOperationEnd(operation, nil) } else { computeSystem.logOperationEnd(operation, err) @@ -312,7 +320,7 @@ func (computeSystem *System) Terminate() (err error) { operation := "hcsshim::ComputeSystem::Terminate" computeSystem.logOperationBegin(operation) defer func() { - if IsPending(err) { + if IsAlreadyClosed(err) || IsAlreadyStopped(err) || IsPending(err) { computeSystem.logOperationEnd(operation, nil) } else { computeSystem.logOperationEnd(operation, err) @@ -335,48 +343,67 @@ func (computeSystem *System) Terminate() (err error) { return nil } -// Wait synchronously waits for the compute system to shutdown or terminate. +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait`, +// `WaitExpectedError`, and `WaitTimeout` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait`, +// `WaitExpectedError`, and `WaitTimeout` are safe to call multiple times. +func (computeSystem *System) waitBackground() { + computeSystem.waitError = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + computeSystem.closedWaitOnce.Do(func() { + close(computeSystem.waitBlock) + }) +} + +// Wait synchronously waits for the compute system to shutdown or terminate. If +// the compute system has already exited returns the previous error (if any). func (computeSystem *System) Wait() (err error) { operation := "hcsshim::ComputeSystem::Wait" computeSystem.logOperationBegin(operation) defer func() { computeSystem.logOperationEnd(operation, err) }() - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - if err != nil { - return makeSystemError(computeSystem, "Wait", "", err, nil) + <-computeSystem.waitBlock + if computeSystem.waitError != nil { + return makeSystemError(computeSystem, "Wait", "", computeSystem.waitError, nil) } return nil } // WaitExpectedError synchronously waits for the compute system to shutdown or -// terminate, and ignores the passed error if it occurs. +// terminate and returns the error (if any) as long as it does not match +// `expected`. If the compute system has already exited returns the previous +// error (if any) as long as it does not match `expected`. func (computeSystem *System) WaitExpectedError(expected error) (err error) { operation := "hcsshim::ComputeSystem::WaitExpectedError" computeSystem.logOperationBegin(operation) defer func() { computeSystem.logOperationEnd(operation, err) }() - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - if err != nil && getInnerError(err) != expected { - return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil) + <-computeSystem.waitBlock + if computeSystem.waitError != nil && getInnerError(computeSystem.waitError) != expected { + return makeSystemError(computeSystem, "WaitExpectedError", "", computeSystem.waitError, nil) } - return nil } -// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse. -// If the timeout expires, IsTimeout(err) == true +// WaitTimeout synchronously waits for the compute system to terminate or the +// duration to elapse. If the timeout expires, `IsTimeout(err) == true`. If +// the compute system has already exited returns the previous error (if any). func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) { operation := "hcsshim::ComputeSystem::WaitTimeout" computeSystem.logOperationBegin(operation) defer func() { computeSystem.logOperationEnd(operation, err) }() - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout) - if err != nil { - return makeSystemError(computeSystem, "WaitTimeout", "", err, nil) + select { + case <-computeSystem.waitBlock: + if computeSystem.waitError != nil { + return makeSystemError(computeSystem, "WaitTimeout", "", computeSystem.waitError, nil) + } + return nil + case <-time.After(timeout): + return makeSystemError(computeSystem, "WaitTimeout", "", ErrTimeout, nil) } - - return nil } func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) { @@ -387,18 +414,19 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem computeSystem.logOperationBegin(operation) defer func() { computeSystem.logOperationEnd(operation, err) }() - queryj, err := json.Marshal(schema1.PropertyQuery{types}) + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) if err != nil { return nil, makeSystemError(computeSystem, "Properties", "", err, nil) } + queryString := string(queryBytes) logrus.WithFields(computeSystem.logctx). - WithField(logfields.JSON, queryj). + WithField(logfields.JSON, queryString). Debug("HCS ComputeSystem Properties Query") var resultp, propertiesp *uint16 syscallWatcher(computeSystem.logctx, func() { - err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp) + err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryString), &propertiesp, &resultp) }) events := processHcsResult(resultp) if err != nil { @@ -519,6 +547,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error if err = process.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) } + go process.waitBackground() return process, nil } @@ -557,6 +586,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) { if err = process.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil) } + go process.waitBackground() return process, nil } @@ -587,13 +617,17 @@ func (computeSystem *System) Close() (err error) { } computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + close(computeSystem.waitBlock) + }) return nil } func (computeSystem *System) registerCallback() error { context := ¬ifcationWatcherContext{ - channels: newChannels(), + channels: newSystemChannels(), + systemID: computeSystem.id, } callbackMapLock.Lock() @@ -640,7 +674,7 @@ func (computeSystem *System) unregisterCallback() error { closeChannels(context.channels) callbackMapLock.Lock() - callbackMap[callbackNumber] = nil + delete(callbackMap, callbackNumber) callbackMapLock.Unlock() handle = 0 diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go index 91e212c5748..c7d660cc74a 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -17,6 +17,11 @@ func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, e func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + logrus.Errorf("failed to waitForNotification: callbackNumber %d does not exist in callbackMap", callbackNumber) + return ErrHandleClose + } channels := callbackMap[callbackNumber].channels callbackMapLock.RUnlock() diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go index fcd5cdc87f6..20bfad252f7 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go @@ -56,13 +56,13 @@ var ( procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") ) func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { @@ -417,10 +417,10 @@ func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr e } func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { + if hr = procHcsSignalProcess.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go index 7e859de9124..b7ae96fdddc 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -2,9 +2,9 @@ package hns import ( "encoding/json" - "net" - + "errors" "github.com/sirupsen/logrus" + "net" ) // Subnet is assoicated with a network and represents a list @@ -98,6 +98,12 @@ func (network *HNSNetwork) Create() (*HNSNetwork, error) { title := "hcsshim::HNSNetwork::" + operation logrus.Debugf(title+" id=%s", network.Id) + for _, subnet := range network.Subnets { + if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + } + jsonString, err := json.Marshal(network) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go index eb171817a69..41f8fdea029 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go @@ -10,7 +10,6 @@ package hcsschema type Plan9Share struct { - Name string `json:"Name,omitempty"` // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. @@ -30,4 +29,6 @@ type Plan9Share struct { ReadOnly bool `json:"ReadOnly,omitempty"` UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/vendor.conf b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/vendor.conf index 6e0ed156621..888336ed5ea 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/vendor.conf +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/vendor.conf @@ -1,13 +1,20 @@ github.com/blang/semver v3.1.0 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 +github.com/containerd/containerd faec567304bbdf6864b1663d4f813641b5880a4a github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a +github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 +github.com/gogo/protobuf v1.0.0 +github.com/golang/protobuf v1.1.0 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c -github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2 +github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac github.com/Microsoft/opengcs v0.3.9 -github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 +github.com/opencontainers/runc 12f6a991201fdb8f82579582d5e00e28fba06d0a +github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88 github.com/pkg/errors v0.8.1 github.com/sirupsen/logrus v1.3.0 @@ -17,5 +24,10 @@ github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874 golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 +golang.org/x/net ed066c81e75eba56dd9bd2139ade88125b855585 golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f -golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb \ No newline at end of file +golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb +golang.org/x/text d14c52b222ee852cdba8b07206ca0c614b389876 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 +google.golang.org/grpc v1.12.0 +k8s.io/kubernetes v1.13.0 diff --git a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go index d84db2fca17..e2e02d808ef 100644 --- a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go +++ b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -3,16 +3,17 @@ package csi -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" -import wrappers "github.com/golang/protobuf/ptypes/wrappers" - import ( - context "golang.org/x/net/context" + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -24,7 +25,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type PluginCapability_Service_Type int32 @@ -53,6 +54,7 @@ var PluginCapability_Service_Type_name = map[int32]string{ 1: "CONTROLLER_SERVICE", 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", } + var PluginCapability_Service_Type_value = map[string]int32{ "UNKNOWN": 0, "CONTROLLER_SERVICE": 1, @@ -62,8 +64,9 @@ var PluginCapability_Service_Type_value = map[string]int32{ func (x PluginCapability_Service_Type) String() string { return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) } + func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{4, 0, 0} + return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0} } type PluginCapability_VolumeExpansion_Type int32 @@ -114,6 +117,7 @@ var PluginCapability_VolumeExpansion_Type_name = map[int32]string{ 1: "ONLINE", 2: "OFFLINE", } + var PluginCapability_VolumeExpansion_Type_value = map[string]int32{ "UNKNOWN": 0, "ONLINE": 1, @@ -123,8 +127,9 @@ var PluginCapability_VolumeExpansion_Type_value = map[string]int32{ func (x PluginCapability_VolumeExpansion_Type) String() string { return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x)) } + func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{4, 1, 0} + return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0} } type VolumeCapability_AccessMode_Mode int32 @@ -155,6 +160,7 @@ var VolumeCapability_AccessMode_Mode_name = map[int32]string{ 4: "MULTI_NODE_SINGLE_WRITER", 5: "MULTI_NODE_MULTI_WRITER", } + var VolumeCapability_AccessMode_Mode_value = map[string]int32{ "UNKNOWN": 0, "SINGLE_NODE_WRITER": 1, @@ -167,8 +173,9 @@ var VolumeCapability_AccessMode_Mode_value = map[string]int32{ func (x VolumeCapability_AccessMode_Mode) String() string { return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) } + func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{10, 2, 0} + return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0} } type ControllerServiceCapability_RPC_Type int32 @@ -195,38 +202,45 @@ const ( ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8 // See VolumeExpansion for details. ControllerServiceCapability_RPC_EXPAND_VOLUME ControllerServiceCapability_RPC_Type = 9 + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES ControllerServiceCapability_RPC_Type = 10 ) var ControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_VOLUME", - 2: "PUBLISH_UNPUBLISH_VOLUME", - 3: "LIST_VOLUMES", - 4: "GET_CAPACITY", - 5: "CREATE_DELETE_SNAPSHOT", - 6: "LIST_SNAPSHOTS", - 7: "CLONE_VOLUME", - 8: "PUBLISH_READONLY", - 9: "EXPAND_VOLUME", + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", + 7: "CLONE_VOLUME", + 8: "PUBLISH_READONLY", + 9: "EXPAND_VOLUME", + 10: "LIST_VOLUMES_PUBLISHED_NODES", } + var ControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_VOLUME": 1, - "PUBLISH_UNPUBLISH_VOLUME": 2, - "LIST_VOLUMES": 3, - "GET_CAPACITY": 4, - "CREATE_DELETE_SNAPSHOT": 5, - "LIST_SNAPSHOTS": 6, - "CLONE_VOLUME": 7, - "PUBLISH_READONLY": 8, - "EXPAND_VOLUME": 9, + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, + "CLONE_VOLUME": 7, + "PUBLISH_READONLY": 8, + "EXPAND_VOLUME": 9, + "LIST_VOLUMES_PUBLISHED_NODES": 10, } func (x ControllerServiceCapability_RPC_Type) String() string { return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) } + func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{29, 0, 0} + return fileDescriptor_9cdb00adce470e01, []int{29, 0, 0} } type VolumeUsage_Unit int32 @@ -242,6 +256,7 @@ var VolumeUsage_Unit_name = map[int32]string{ 1: "BYTES", 2: "INODES", } + var VolumeUsage_Unit_value = map[string]int32{ "UNKNOWN": 0, "BYTES": 1, @@ -251,8 +266,9 @@ var VolumeUsage_Unit_value = map[string]int32{ func (x VolumeUsage_Unit) String() string { return proto.EnumName(VolumeUsage_Unit_name, int32(x)) } + func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{49, 0} + return fileDescriptor_9cdb00adce470e01, []int{49, 0} } type NodeServiceCapability_RPC_Type int32 @@ -274,6 +290,7 @@ var NodeServiceCapability_RPC_Type_name = map[int32]string{ 2: "GET_VOLUME_STATS", 3: "EXPAND_VOLUME", } + var NodeServiceCapability_RPC_Type_value = map[string]int32{ "UNKNOWN": 0, "STAGE_UNSTAGE_VOLUME": 1, @@ -284,8 +301,9 @@ var NodeServiceCapability_RPC_Type_value = map[string]int32{ func (x NodeServiceCapability_RPC_Type) String() string { return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) } + func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{52, 0, 0} + return fileDescriptor_9cdb00adce470e01, []int{52, 0, 0} } type GetPluginInfoRequest struct { @@ -298,16 +316,17 @@ func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } func (*GetPluginInfoRequest) ProtoMessage() {} func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{0} + return fileDescriptor_9cdb00adce470e01, []int{0} } + func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) } func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) } -func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) +func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(m, src) } func (m *GetPluginInfoRequest) XXX_Size() int { return xxx_messageInfo_GetPluginInfoRequest.Size(m) @@ -340,16 +359,17 @@ func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } func (*GetPluginInfoResponse) ProtoMessage() {} func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{1} + return fileDescriptor_9cdb00adce470e01, []int{1} } + func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) } func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) } -func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) +func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(m, src) } func (m *GetPluginInfoResponse) XXX_Size() int { return xxx_messageInfo_GetPluginInfoResponse.Size(m) @@ -391,16 +411,17 @@ func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilit func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } func (*GetPluginCapabilitiesRequest) ProtoMessage() {} func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{2} + return fileDescriptor_9cdb00adce470e01, []int{2} } + func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) } func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) } -func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) +func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src) } func (m *GetPluginCapabilitiesRequest) XXX_Size() int { return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) @@ -424,16 +445,17 @@ func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabili func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } func (*GetPluginCapabilitiesResponse) ProtoMessage() {} func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{3} + return fileDescriptor_9cdb00adce470e01, []int{3} } + func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) } func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) } -func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) +func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src) } func (m *GetPluginCapabilitiesResponse) XXX_Size() int { return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) @@ -466,16 +488,17 @@ func (m *PluginCapability) Reset() { *m = PluginCapability{} } func (m *PluginCapability) String() string { return proto.CompactTextString(m) } func (*PluginCapability) ProtoMessage() {} func (*PluginCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{4} + return fileDescriptor_9cdb00adce470e01, []int{4} } + func (m *PluginCapability) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PluginCapability.Unmarshal(m, b) } func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) } -func (dst *PluginCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability.Merge(dst, src) +func (m *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(m, src) } func (m *PluginCapability) XXX_Size() int { return xxx_messageInfo_PluginCapability.Size(m) @@ -523,80 +546,14 @@ func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansio return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*PluginCapability_Service_)(nil), (*PluginCapability_VolumeExpansion_)(nil), } } -func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Service); err != nil { - return err - } - case *PluginCapability_VolumeExpansion_: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.VolumeExpansion); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) - } - return nil -} - -func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*PluginCapability) - switch tag { - case 1: // type.service - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PluginCapability_Service) - err := b.DecodeMessage(msg) - m.Type = &PluginCapability_Service_{msg} - return true, err - case 2: // type.volume_expansion - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PluginCapability_VolumeExpansion) - err := b.DecodeMessage(msg) - m.Type = &PluginCapability_VolumeExpansion_{msg} - return true, err - default: - return false, nil - } -} - -func _PluginCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - s := proto.Size(x.Service) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *PluginCapability_VolumeExpansion_: - s := proto.Size(x.VolumeExpansion) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type PluginCapability_Service struct { Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -608,16 +565,17 @@ func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Servi func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } func (*PluginCapability_Service) ProtoMessage() {} func (*PluginCapability_Service) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{4, 0} + return fileDescriptor_9cdb00adce470e01, []int{4, 0} } + func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) } func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) } -func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_Service.Merge(dst, src) +func (m *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(m, src) } func (m *PluginCapability_Service) XXX_Size() int { return xxx_messageInfo_PluginCapability_Service.Size(m) @@ -646,16 +604,17 @@ func (m *PluginCapability_VolumeExpansion) Reset() { *m = PluginCapabili func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) } func (*PluginCapability_VolumeExpansion) ProtoMessage() {} func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{4, 1} + return fileDescriptor_9cdb00adce470e01, []int{4, 1} } + func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b) } func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic) } -func (dst *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(dst, src) +func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src) } func (m *PluginCapability_VolumeExpansion) XXX_Size() int { return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m) @@ -683,16 +642,17 @@ func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } func (*ProbeRequest) ProtoMessage() {} func (*ProbeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{5} + return fileDescriptor_9cdb00adce470e01, []int{5} } + func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) } func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) } -func (dst *ProbeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeRequest.Merge(dst, src) +func (m *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(m, src) } func (m *ProbeRequest) XXX_Size() int { return xxx_messageInfo_ProbeRequest.Size(m) @@ -734,16 +694,17 @@ func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } func (*ProbeResponse) ProtoMessage() {} func (*ProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{6} + return fileDescriptor_9cdb00adce470e01, []int{6} } + func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) } func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) } -func (dst *ProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeResponse.Merge(dst, src) +func (m *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(m, src) } func (m *ProbeResponse) XXX_Size() int { return xxx_messageInfo_ProbeResponse.Size(m) @@ -843,16 +804,17 @@ func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } func (*CreateVolumeRequest) ProtoMessage() {} func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{7} + return fileDescriptor_9cdb00adce470e01, []int{7} } + func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) } func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) } -func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) +func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(m, src) } func (m *CreateVolumeRequest) XXX_Size() int { return xxx_messageInfo_CreateVolumeRequest.Size(m) @@ -928,16 +890,17 @@ func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } func (*VolumeContentSource) ProtoMessage() {} func (*VolumeContentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{8} + return fileDescriptor_9cdb00adce470e01, []int{8} } + func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) } func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) } -func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource.Merge(dst, src) +func (m *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(m, src) } func (m *VolumeContentSource) XXX_Size() int { return xxx_messageInfo_VolumeContentSource.Size(m) @@ -985,80 +948,14 @@ func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*VolumeContentSource_Snapshot)(nil), (*VolumeContentSource_Volume)(nil), } } -func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Snapshot); err != nil { - return err - } - case *VolumeContentSource_Volume: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Volume); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) - } - return nil -} - -func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeContentSource) - switch tag { - case 1: // type.snapshot - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeContentSource_SnapshotSource) - err := b.DecodeMessage(msg) - m.Type = &VolumeContentSource_Snapshot{msg} - return true, err - case 2: // type.volume - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeContentSource_VolumeSource) - err := b.DecodeMessage(msg) - m.Type = &VolumeContentSource_Volume{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - s := proto.Size(x.Snapshot) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *VolumeContentSource_Volume: - s := proto.Size(x.Volume) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type VolumeContentSource_SnapshotSource struct { // Contains identity information for the existing source snapshot. // This field is REQUIRED. Plugin is REQUIRED to support creating @@ -1074,16 +971,17 @@ func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeConten func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{8, 0} + return fileDescriptor_9cdb00adce470e01, []int{8, 0} } + func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) } func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) } -func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) +func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src) } func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) @@ -1115,16 +1013,17 @@ func (m *VolumeContentSource_VolumeSource) Reset() { *m = VolumeContentS func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) } func (*VolumeContentSource_VolumeSource) ProtoMessage() {} func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{8, 1} + return fileDescriptor_9cdb00adce470e01, []int{8, 1} } + func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b) } func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic) } -func (dst *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(dst, src) +func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src) } func (m *VolumeContentSource_VolumeSource) XXX_Size() int { return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m) @@ -1156,16 +1055,17 @@ func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } func (*CreateVolumeResponse) ProtoMessage() {} func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{9} + return fileDescriptor_9cdb00adce470e01, []int{9} } + func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) } func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) } -func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) +func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(m, src) } func (m *CreateVolumeResponse) XXX_Size() int { return xxx_messageInfo_CreateVolumeResponse.Size(m) @@ -1203,16 +1103,17 @@ func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } func (*VolumeCapability) ProtoMessage() {} func (*VolumeCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{10} + return fileDescriptor_9cdb00adce470e01, []int{10} } + func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) } func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) } -func (dst *VolumeCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability.Merge(dst, src) +func (m *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(m, src) } func (m *VolumeCapability) XXX_Size() int { return xxx_messageInfo_VolumeCapability.Size(m) @@ -1267,80 +1168,14 @@ func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*VolumeCapability_Block)(nil), (*VolumeCapability_Mount)(nil), } } -func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Block); err != nil { - return err - } - case *VolumeCapability_Mount: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mount); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) - } - return nil -} - -func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeCapability) - switch tag { - case 1: // access_type.block - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_BlockVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Block{msg} - return true, err - case 2: // access_type.mount - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_MountVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Mount{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - s := proto.Size(x.Block) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *VolumeCapability_Mount: - s := proto.Size(x.Mount) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // Indicate that the volume will be accessed via the block device API. type VolumeCapability_BlockVolume struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1352,16 +1187,17 @@ func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_B func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } func (*VolumeCapability_BlockVolume) ProtoMessage() {} func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{10, 0} + return fileDescriptor_9cdb00adce470e01, []int{10, 0} } + func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) } func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) } -func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) +func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src) } func (m *VolumeCapability_BlockVolume) XXX_Size() int { return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) @@ -1392,16 +1228,17 @@ func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_M func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } func (*VolumeCapability_MountVolume) ProtoMessage() {} func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{10, 1} + return fileDescriptor_9cdb00adce470e01, []int{10, 1} } + func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) } func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) } -func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) +func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src) } func (m *VolumeCapability_MountVolume) XXX_Size() int { return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) @@ -1439,16 +1276,17 @@ func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_Ac func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } func (*VolumeCapability_AccessMode) ProtoMessage() {} func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{10, 2} + return fileDescriptor_9cdb00adce470e01, []int{10, 2} } + func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) } func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) } -func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) +func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src) } func (m *VolumeCapability_AccessMode) XXX_Size() int { return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) @@ -1487,16 +1325,17 @@ func (m *CapacityRange) Reset() { *m = CapacityRange{} } func (m *CapacityRange) String() string { return proto.CompactTextString(m) } func (*CapacityRange) ProtoMessage() {} func (*CapacityRange) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{11} + return fileDescriptor_9cdb00adce470e01, []int{11} } + func (m *CapacityRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CapacityRange.Unmarshal(m, b) } func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) } -func (dst *CapacityRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_CapacityRange.Merge(dst, src) +func (m *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(m, src) } func (m *CapacityRange) XXX_Size() int { return xxx_messageInfo_CapacityRange.Size(m) @@ -1592,16 +1431,17 @@ func (m *Volume) Reset() { *m = Volume{} } func (m *Volume) String() string { return proto.CompactTextString(m) } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{12} + return fileDescriptor_9cdb00adce470e01, []int{12} } + func (m *Volume) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Volume.Unmarshal(m, b) } func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Volume.Marshal(b, m, deterministic) } -func (dst *Volume) XXX_Merge(src proto.Message) { - xxx_messageInfo_Volume.Merge(dst, src) +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) } func (m *Volume) XXX_Size() int { return xxx_messageInfo_Volume.Size(m) @@ -1784,16 +1624,17 @@ func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } func (*TopologyRequirement) ProtoMessage() {} func (*TopologyRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{13} + return fileDescriptor_9cdb00adce470e01, []int{13} } + func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) } func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) } -func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologyRequirement.Merge(dst, src) +func (m *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(m, src) } func (m *TopologyRequirement) XXX_Size() int { return xxx_messageInfo_TopologyRequirement.Size(m) @@ -1858,16 +1699,17 @@ func (m *Topology) Reset() { *m = Topology{} } func (m *Topology) String() string { return proto.CompactTextString(m) } func (*Topology) ProtoMessage() {} func (*Topology) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{14} + return fileDescriptor_9cdb00adce470e01, []int{14} } + func (m *Topology) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Topology.Unmarshal(m, b) } func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Topology.Marshal(b, m, deterministic) } -func (dst *Topology) XXX_Merge(src proto.Message) { - xxx_messageInfo_Topology.Merge(dst, src) +func (m *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(m, src) } func (m *Topology) XXX_Size() int { return xxx_messageInfo_Topology.Size(m) @@ -1902,16 +1744,17 @@ func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } func (*DeleteVolumeRequest) ProtoMessage() {} func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{15} + return fileDescriptor_9cdb00adce470e01, []int{15} } + func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) } func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) } -func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) +func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(m, src) } func (m *DeleteVolumeRequest) XXX_Size() int { return xxx_messageInfo_DeleteVolumeRequest.Size(m) @@ -1946,16 +1789,17 @@ func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } func (*DeleteVolumeResponse) ProtoMessage() {} func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{16} + return fileDescriptor_9cdb00adce470e01, []int{16} } + func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) } func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) } -func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) +func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(m, src) } func (m *DeleteVolumeResponse) XXX_Size() int { return xxx_messageInfo_DeleteVolumeResponse.Size(m) @@ -2000,16 +1844,17 @@ func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublis func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } func (*ControllerPublishVolumeRequest) ProtoMessage() {} func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{17} + return fileDescriptor_9cdb00adce470e01, []int{17} } + func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) } func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) } -func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) +func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src) } func (m *ControllerPublishVolumeRequest) XXX_Size() int { return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) @@ -2086,16 +1931,17 @@ func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPubli func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } func (*ControllerPublishVolumeResponse) ProtoMessage() {} func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{18} + return fileDescriptor_9cdb00adce470e01, []int{18} } + func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) } func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) } -func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) +func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src) } func (m *ControllerPublishVolumeResponse) XXX_Size() int { return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) @@ -2137,16 +1983,17 @@ func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpu func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{19} + return fileDescriptor_9cdb00adce470e01, []int{19} } + func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) } func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) } -func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) +func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src) } func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) @@ -2188,16 +2035,17 @@ func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnp func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{20} + return fileDescriptor_9cdb00adce470e01, []int{20} } + func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) } func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) } -func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) +func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src) } func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) @@ -2235,16 +2083,17 @@ func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolum func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{21} + return fileDescriptor_9cdb00adce470e01, []int{21} } + func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) } func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) } -func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) +func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src) } func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) @@ -2313,16 +2162,17 @@ func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolu func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{22} + return fileDescriptor_9cdb00adce470e01, []int{22} } + func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) } func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) } -func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) +func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src) } func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) @@ -2370,16 +2220,17 @@ func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { } func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{22, 0} + return fileDescriptor_9cdb00adce470e01, []int{22, 0} } + func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b) } func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic) } -func (dst *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(dst, src) +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src) } func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int { return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m) @@ -2435,16 +2286,17 @@ func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } func (*ListVolumesRequest) ProtoMessage() {} func (*ListVolumesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{23} + return fileDescriptor_9cdb00adce470e01, []int{23} } + func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) } func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) } -func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesRequest.Merge(dst, src) +func (m *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(m, src) } func (m *ListVolumesRequest) XXX_Size() int { return xxx_messageInfo_ListVolumesRequest.Size(m) @@ -2487,16 +2339,17 @@ func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } func (*ListVolumesResponse) ProtoMessage() {} func (*ListVolumesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{24} + return fileDescriptor_9cdb00adce470e01, []int{24} } + func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) } func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) } -func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse.Merge(dst, src) +func (m *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(m, src) } func (m *ListVolumesResponse) XXX_Size() int { return xxx_messageInfo_ListVolumesResponse.Size(m) @@ -2521,27 +2374,82 @@ func (m *ListVolumesResponse) GetNextToken() string { return "" } -type ListVolumesResponse_Entry struct { - Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` +type ListVolumesResponse_VolumeStatus struct { + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } +func (m *ListVolumesResponse_VolumeStatus) Reset() { *m = ListVolumesResponse_VolumeStatus{} } +func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_VolumeStatus) ProtoMessage() {} +func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24, 0} +} + +func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo + +func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string { + if m != nil { + return m.PublishedNodeIds + } + return nil +} + +type ListVolumesResponse_Entry struct { + // This field is REQUIRED + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + Status *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } func (*ListVolumesResponse_Entry) ProtoMessage() {} func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{24, 0} + return fileDescriptor_9cdb00adce470e01, []int{24, 1} } + func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) } func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) } -func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) +func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src) } func (m *ListVolumesResponse_Entry) XXX_Size() int { return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) @@ -2559,6 +2467,13 @@ func (m *ListVolumesResponse_Entry) GetVolume() *Volume { return nil } +func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus { + if m != nil { + return m.Status + } + return nil +} + type GetCapacityRequest struct { // If specified, the Plugin SHALL report the capacity of the storage // that can be used to provision volumes that satisfy ALL of the @@ -2587,16 +2502,17 @@ func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } func (*GetCapacityRequest) ProtoMessage() {} func (*GetCapacityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{25} + return fileDescriptor_9cdb00adce470e01, []int{25} } + func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) } func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) } -func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityRequest.Merge(dst, src) +func (m *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(m, src) } func (m *GetCapacityRequest) XXX_Size() int { return xxx_messageInfo_GetCapacityRequest.Size(m) @@ -2645,16 +2561,17 @@ func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } func (*GetCapacityResponse) ProtoMessage() {} func (*GetCapacityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{26} + return fileDescriptor_9cdb00adce470e01, []int{26} } + func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) } func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) } -func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityResponse.Merge(dst, src) +func (m *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(m, src) } func (m *GetCapacityResponse) XXX_Size() int { return xxx_messageInfo_GetCapacityResponse.Size(m) @@ -2682,16 +2599,17 @@ func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetC func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{27} + return fileDescriptor_9cdb00adce470e01, []int{27} } + func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) } func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) } -func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) +func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src) } func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) @@ -2715,16 +2633,17 @@ func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGet func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{28} + return fileDescriptor_9cdb00adce470e01, []int{28} } + func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) } func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) } -func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) +func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src) } func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) @@ -2756,16 +2675,17 @@ func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCa func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } func (*ControllerServiceCapability) ProtoMessage() {} func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{29} + return fileDescriptor_9cdb00adce470e01, []int{29} } + func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) } func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) } -func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) +func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(m, src) } func (m *ControllerServiceCapability) XXX_Size() int { return xxx_messageInfo_ControllerServiceCapability.Size(m) @@ -2800,61 +2720,13 @@ func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*ControllerServiceCapability_Rpc)(nil), } } -func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ControllerServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ControllerServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &ControllerServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type ControllerServiceCapability_RPC struct { Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2866,16 +2738,17 @@ func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServi func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } func (*ControllerServiceCapability_RPC) ProtoMessage() {} func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{29, 0} + return fileDescriptor_9cdb00adce470e01, []int{29, 0} } + func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) } func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) } -func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) +func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src) } func (m *ControllerServiceCapability_RPC) XXX_Size() int { return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) @@ -2928,16 +2801,17 @@ func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } func (*CreateSnapshotRequest) ProtoMessage() {} func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{30} + return fileDescriptor_9cdb00adce470e01, []int{30} } + func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) } func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) } -func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) +func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(m, src) } func (m *CreateSnapshotRequest) XXX_Size() int { return xxx_messageInfo_CreateSnapshotRequest.Size(m) @@ -2990,16 +2864,17 @@ func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } func (*CreateSnapshotResponse) ProtoMessage() {} func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{31} + return fileDescriptor_9cdb00adce470e01, []int{31} } + func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) } func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) } -func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) +func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(m, src) } func (m *CreateSnapshotResponse) XXX_Size() int { return xxx_messageInfo_CreateSnapshotResponse.Size(m) @@ -3057,16 +2932,17 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{32} + return fileDescriptor_9cdb00adce470e01, []int{32} } + func (m *Snapshot) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Snapshot.Unmarshal(m, b) } func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) } -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) } func (m *Snapshot) XXX_Size() int { return xxx_messageInfo_Snapshot.Size(m) @@ -3129,16 +3005,17 @@ func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } func (*DeleteSnapshotRequest) ProtoMessage() {} func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{33} + return fileDescriptor_9cdb00adce470e01, []int{33} } + func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) } func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) } -func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) +func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src) } func (m *DeleteSnapshotRequest) XXX_Size() int { return xxx_messageInfo_DeleteSnapshotRequest.Size(m) @@ -3173,16 +3050,17 @@ func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } func (*DeleteSnapshotResponse) ProtoMessage() {} func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{34} + return fileDescriptor_9cdb00adce470e01, []int{34} } + func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) } func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) } -func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) +func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src) } func (m *DeleteSnapshotResponse) XXX_Size() int { return xxx_messageInfo_DeleteSnapshotResponse.Size(m) @@ -3218,26 +3096,31 @@ type ListSnapshotsRequest struct { // ListSnapshots will return with current snapshot information // and will not block if the snapshot is being processed after // it is cut. - SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } func (*ListSnapshotsRequest) ProtoMessage() {} func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{35} + return fileDescriptor_9cdb00adce470e01, []int{35} } + func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) } func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) } -func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) +func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) } func (m *ListSnapshotsRequest) XXX_Size() int { return xxx_messageInfo_ListSnapshotsRequest.Size(m) @@ -3276,6 +3159,13 @@ func (m *ListSnapshotsRequest) GetSnapshotId() string { return "" } +func (m *ListSnapshotsRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + type ListSnapshotsResponse struct { Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` // This token allows you to get the next page of entries for @@ -3294,16 +3184,17 @@ func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } func (*ListSnapshotsResponse) ProtoMessage() {} func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{36} + return fileDescriptor_9cdb00adce470e01, []int{36} } + func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) } func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) } -func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) +func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) } func (m *ListSnapshotsResponse) XXX_Size() int { return xxx_messageInfo_ListSnapshotsResponse.Size(m) @@ -3339,16 +3230,17 @@ func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsRespon func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } func (*ListSnapshotsResponse_Entry) ProtoMessage() {} func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{36, 0} + return fileDescriptor_9cdb00adce470e01, []int{36, 0} } + func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) } func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) } -func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) +func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src) } func (m *ListSnapshotsResponse_Entry) XXX_Size() int { return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) @@ -3374,7 +3266,15 @@ type ControllerExpandVolumeRequest struct { CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` // Secrets required by the plugin for expanding the volume. // This field is OPTIONAL. - Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3384,16 +3284,17 @@ func (m *ControllerExpandVolumeRequest) Reset() { *m = ControllerExpandV func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) } func (*ControllerExpandVolumeRequest) ProtoMessage() {} func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{37} + return fileDescriptor_9cdb00adce470e01, []int{37} } + func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b) } func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic) } -func (dst *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerExpandVolumeRequest.Merge(dst, src) +func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src) } func (m *ControllerExpandVolumeRequest) XXX_Size() int { return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m) @@ -3425,6 +3326,13 @@ func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string { return nil } +func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + type ControllerExpandVolumeResponse struct { // Capacity of volume after expansion. This field is REQUIRED. CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` @@ -3441,16 +3349,17 @@ func (m *ControllerExpandVolumeResponse) Reset() { *m = ControllerExpand func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) } func (*ControllerExpandVolumeResponse) ProtoMessage() {} func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{38} + return fileDescriptor_9cdb00adce470e01, []int{38} } + func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b) } func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic) } -func (dst *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerExpandVolumeResponse.Merge(dst, src) +func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src) } func (m *ControllerExpandVolumeResponse) XXX_Size() int { return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m) @@ -3516,16 +3425,17 @@ func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } func (*NodeStageVolumeRequest) ProtoMessage() {} func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{39} + return fileDescriptor_9cdb00adce470e01, []int{39} } + func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) } func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) } -func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) +func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src) } func (m *NodeStageVolumeRequest) XXX_Size() int { return xxx_messageInfo_NodeStageVolumeRequest.Size(m) @@ -3588,16 +3498,17 @@ func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } func (*NodeStageVolumeResponse) ProtoMessage() {} func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{40} + return fileDescriptor_9cdb00adce470e01, []int{40} } + func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) } func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) } -func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) +func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src) } func (m *NodeStageVolumeResponse) XXX_Size() int { return xxx_messageInfo_NodeStageVolumeResponse.Size(m) @@ -3624,16 +3535,17 @@ func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeReque func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } func (*NodeUnstageVolumeRequest) ProtoMessage() {} func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{41} + return fileDescriptor_9cdb00adce470e01, []int{41} } + func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) } func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) } -func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) +func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src) } func (m *NodeUnstageVolumeRequest) XXX_Size() int { return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) @@ -3668,16 +3580,17 @@ func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResp func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } func (*NodeUnstageVolumeResponse) ProtoMessage() {} func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{42} + return fileDescriptor_9cdb00adce470e01, []int{42} } + func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) } func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) } -func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) +func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src) } func (m *NodeUnstageVolumeResponse) XXX_Size() int { return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) @@ -3742,16 +3655,17 @@ func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeReque func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } func (*NodePublishVolumeRequest) ProtoMessage() {} func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{43} + return fileDescriptor_9cdb00adce470e01, []int{43} } + func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) } func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) } -func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) +func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src) } func (m *NodePublishVolumeRequest) XXX_Size() int { return xxx_messageInfo_NodePublishVolumeRequest.Size(m) @@ -3828,16 +3742,17 @@ func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResp func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } func (*NodePublishVolumeResponse) ProtoMessage() {} func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{44} + return fileDescriptor_9cdb00adce470e01, []int{44} } + func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) } func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) } -func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) +func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src) } func (m *NodePublishVolumeResponse) XXX_Size() int { return xxx_messageInfo_NodePublishVolumeResponse.Size(m) @@ -3865,16 +3780,17 @@ func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeR func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } func (*NodeUnpublishVolumeRequest) ProtoMessage() {} func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{45} + return fileDescriptor_9cdb00adce470e01, []int{45} } + func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) } func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) } -func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) +func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src) } func (m *NodeUnpublishVolumeRequest) XXX_Size() int { return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) @@ -3909,16 +3825,17 @@ func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolume func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } func (*NodeUnpublishVolumeResponse) ProtoMessage() {} func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{46} + return fileDescriptor_9cdb00adce470e01, []int{46} } + func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) } func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) } -func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) +func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src) } func (m *NodeUnpublishVolumeResponse) XXX_Size() int { return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) @@ -3937,7 +3854,13 @@ type NodeGetVolumeStatsRequest struct { // It MUST be an absolute path in the root filesystem of // the process serving this request. // This is a REQUIRED field. - VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3947,16 +3870,17 @@ func (m *NodeGetVolumeStatsRequest) Reset() { *m = NodeGetVolumeStatsReq func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) } func (*NodeGetVolumeStatsRequest) ProtoMessage() {} func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{47} + return fileDescriptor_9cdb00adce470e01, []int{47} } + func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b) } func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic) } -func (dst *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(dst, src) +func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src) } func (m *NodeGetVolumeStatsRequest) XXX_Size() int { return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m) @@ -3981,6 +3905,13 @@ func (m *NodeGetVolumeStatsRequest) GetVolumePath() string { return "" } +func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + type NodeGetVolumeStatsResponse struct { // This field is OPTIONAL. Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` @@ -3993,16 +3924,17 @@ func (m *NodeGetVolumeStatsResponse) Reset() { *m = NodeGetVolumeStatsRe func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) } func (*NodeGetVolumeStatsResponse) ProtoMessage() {} func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{48} + return fileDescriptor_9cdb00adce470e01, []int{48} } + func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b) } func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic) } -func (dst *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(dst, src) +func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src) } func (m *NodeGetVolumeStatsResponse) XXX_Size() int { return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m) @@ -4041,16 +3973,17 @@ func (m *VolumeUsage) Reset() { *m = VolumeUsage{} } func (m *VolumeUsage) String() string { return proto.CompactTextString(m) } func (*VolumeUsage) ProtoMessage() {} func (*VolumeUsage) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{49} + return fileDescriptor_9cdb00adce470e01, []int{49} } + func (m *VolumeUsage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VolumeUsage.Unmarshal(m, b) } func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic) } -func (dst *VolumeUsage) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeUsage.Merge(dst, src) +func (m *VolumeUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeUsage.Merge(m, src) } func (m *VolumeUsage) XXX_Size() int { return xxx_messageInfo_VolumeUsage.Size(m) @@ -4099,16 +4032,17 @@ func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesR func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } func (*NodeGetCapabilitiesRequest) ProtoMessage() {} func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{50} + return fileDescriptor_9cdb00adce470e01, []int{50} } + func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) } func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) } -func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) +func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src) } func (m *NodeGetCapabilitiesRequest) XXX_Size() int { return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) @@ -4132,16 +4066,17 @@ func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilities func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } func (*NodeGetCapabilitiesResponse) ProtoMessage() {} func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{51} + return fileDescriptor_9cdb00adce470e01, []int{51} } + func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) } func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) } -func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) +func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src) } func (m *NodeGetCapabilitiesResponse) XXX_Size() int { return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) @@ -4173,16 +4108,17 @@ func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } func (*NodeServiceCapability) ProtoMessage() {} func (*NodeServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{52} + return fileDescriptor_9cdb00adce470e01, []int{52} } + func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) } func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) } -func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability.Merge(dst, src) +func (m *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(m, src) } func (m *NodeServiceCapability) XXX_Size() int { return xxx_messageInfo_NodeServiceCapability.Size(m) @@ -4217,61 +4153,13 @@ func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*NodeServiceCapability_Rpc)(nil), } } -func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NodeServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NodeServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &NodeServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type NodeServiceCapability_RPC struct { Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4283,16 +4171,17 @@ func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } func (*NodeServiceCapability_RPC) ProtoMessage() {} func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{52, 0} + return fileDescriptor_9cdb00adce470e01, []int{52, 0} } + func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) } func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) } -func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) +func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src) } func (m *NodeServiceCapability_RPC) XXX_Size() int { return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) @@ -4320,16 +4209,17 @@ func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } func (*NodeGetInfoRequest) ProtoMessage() {} func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{53} + return fileDescriptor_9cdb00adce470e01, []int{53} } + func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) } func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) } -func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) +func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(m, src) } func (m *NodeGetInfoRequest) XXX_Size() int { return xxx_messageInfo_NodeGetInfoRequest.Size(m) @@ -4383,16 +4273,17 @@ func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } func (*NodeGetInfoResponse) ProtoMessage() {} func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{54} + return fileDescriptor_9cdb00adce470e01, []int{54} } + func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) } func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) } -func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) +func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(m, src) } func (m *NodeGetInfoResponse) XXX_Size() int { return xxx_messageInfo_NodeGetInfoResponse.Size(m) @@ -4435,26 +4326,43 @@ type NodeExpandVolumeRequest struct { // capacity to which the volume can be expanded. In such cases a // plugin MAY expand the volume to its maximum capacity. // This field is OPTIONAL. - CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,4,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *NodeExpandVolumeRequest) Reset() { *m = NodeExpandVolumeRequest{} } func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) } func (*NodeExpandVolumeRequest) ProtoMessage() {} func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{55} + return fileDescriptor_9cdb00adce470e01, []int{55} } + func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b) } func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic) } -func (dst *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeExpandVolumeRequest.Merge(dst, src) +func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src) } func (m *NodeExpandVolumeRequest) XXX_Size() int { return xxx_messageInfo_NodeExpandVolumeRequest.Size(m) @@ -4486,6 +4394,20 @@ func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange { return nil } +func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + type NodeExpandVolumeResponse struct { // The capacity of the volume in bytes. This field is OPTIONAL. CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` @@ -4498,16 +4420,17 @@ func (m *NodeExpandVolumeResponse) Reset() { *m = NodeExpandVolumeRespon func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) } func (*NodeExpandVolumeResponse) ProtoMessage() {} func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_2c5455657a82ae49, []int{56} + return fileDescriptor_9cdb00adce470e01, []int{56} } + func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b) } func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic) } -func (dst *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeExpandVolumeResponse.Merge(dst, src) +func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src) } func (m *NodeExpandVolumeResponse) XXX_Size() int { return xxx_messageInfo_NodeExpandVolumeResponse.Size(m) @@ -4530,11 +4453,17 @@ var E_CsiSecret = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 1059, Name: "csi.v1.csi_secret", - Tag: "varint,1059,opt,name=csi_secret,json=csiSecret", + Tag: "varint,1059,opt,name=csi_secret", Filename: "github.com/container-storage-interface/spec/csi.proto", } func init() { + proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value) + proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) + proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") @@ -4583,6 +4512,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry") proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest") proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus") proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry") proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest") proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry") @@ -4600,6 +4530,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry") proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse") proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry") proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse") proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry") proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest") @@ -4630,15 +4561,228 @@ func init() { proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse") proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest") proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse") - proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) - proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value) - proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) - proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) - proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) proto.RegisterExtension(E_CsiSecret) } +func init() { + proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01) +} + +var fileDescriptor_9cdb00adce470e01 = []byte{ + // 3366 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x70, 0xdb, 0xc6, + 0xf5, 0x27, 0xf8, 0x25, 0xea, 0xe9, 0xc3, 0xf4, 0xea, 0xc3, 0x34, 0x24, 0xd9, 0x32, 0x1c, 0x3b, + 0xb2, 0x63, 0xd3, 0xff, 0x28, 0x71, 0xe6, 0x1f, 0x5b, 0x69, 0x43, 0x51, 0xb4, 0xc4, 0x98, 0xa6, + 0x14, 0x90, 0x92, 0x63, 0xb7, 0x19, 0x04, 0x22, 0x57, 0x34, 0x26, 0x24, 0xc0, 0x00, 0xa0, 0x2a, + 0xf5, 0xd2, 0x99, 0xf6, 0xd4, 0x69, 0xef, 0x6d, 0x4f, 0x9d, 0x49, 0x6f, 0x6d, 0x33, 0x39, 0x75, + 0x7a, 0xec, 0x4c, 0x0f, 0x3d, 0xf4, 0xd0, 0xe9, 0xad, 0x9d, 0x5e, 0x72, 0xed, 0x64, 0xda, 0x99, + 0x4c, 0x8f, 0x9d, 0x1e, 0x3a, 0xc0, 0x2e, 0x40, 0x2c, 0x08, 0x80, 0xa4, 0x65, 0x4f, 0x0e, 0x3d, + 0x49, 0x7c, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, 0xef, 0xbd, 0x7d, 0xef, 0xb7, 0x80, 0xbb, 0x2d, 0xc5, + 0x7c, 0xd6, 0x3b, 0xcc, 0x37, 0xb4, 0xce, 0x9d, 0x86, 0xa6, 0x9a, 0xb2, 0xa2, 0x62, 0xfd, 0xb6, + 0x61, 0x6a, 0xba, 0xdc, 0xc2, 0xb7, 0x15, 0xd5, 0xc4, 0xfa, 0x91, 0xdc, 0xc0, 0x77, 0x8c, 0x2e, + 0x6e, 0xdc, 0x69, 0x18, 0x4a, 0xbe, 0xab, 0x6b, 0xa6, 0x86, 0xd2, 0xd6, 0xbf, 0xc7, 0xaf, 0xf3, + 0xab, 0x2d, 0x4d, 0x6b, 0xb5, 0xf1, 0x1d, 0x9b, 0x7a, 0xd8, 0x3b, 0xba, 0xd3, 0xc4, 0x46, 0x43, + 0x57, 0xba, 0xa6, 0xa6, 0x13, 0x4e, 0xfe, 0xb2, 0x9f, 0xc3, 0x54, 0x3a, 0xd8, 0x30, 0xe5, 0x4e, + 0x97, 0x32, 0x5c, 0xf2, 0x33, 0x7c, 0x47, 0x97, 0xbb, 0x5d, 0xac, 0x1b, 0x64, 0x5c, 0x58, 0x84, + 0xf9, 0x6d, 0x6c, 0xee, 0xb5, 0x7b, 0x2d, 0x45, 0x2d, 0xab, 0x47, 0x9a, 0x88, 0x3f, 0xe9, 0x61, + 0xc3, 0x14, 0xfe, 0xca, 0xc1, 0x82, 0x6f, 0xc0, 0xe8, 0x6a, 0xaa, 0x81, 0x11, 0x82, 0xa4, 0x2a, + 0x77, 0x70, 0x8e, 0x5b, 0xe5, 0xd6, 0x26, 0x45, 0xfb, 0x7f, 0x74, 0x0d, 0x66, 0x8f, 0xb1, 0xda, + 0xd4, 0x74, 0xe9, 0x18, 0xeb, 0x86, 0xa2, 0xa9, 0xb9, 0xb8, 0x3d, 0x3a, 0x43, 0xa8, 0x07, 0x84, + 0x88, 0xb6, 0x21, 0xd3, 0x91, 0x55, 0xe5, 0x08, 0x1b, 0x66, 0x2e, 0xb1, 0x9a, 0x58, 0x9b, 0x5a, + 0x7f, 0x2d, 0x4f, 0xb6, 0x9a, 0x0f, 0x5c, 0x2b, 0xff, 0x88, 0x72, 0x97, 0x54, 0x53, 0x3f, 0x15, + 0xdd, 0xc9, 0xfc, 0x7d, 0x98, 0x61, 0x86, 0x50, 0x16, 0x12, 0x1f, 0xe3, 0x53, 0xaa, 0x93, 0xf5, + 0x2f, 0x9a, 0x87, 0xd4, 0xb1, 0xdc, 0xee, 0x61, 0xaa, 0x09, 0xf9, 0x71, 0x2f, 0xfe, 0xff, 0x9c, + 0x70, 0x09, 0x96, 0xdd, 0xd5, 0x8a, 0x72, 0x57, 0x3e, 0x54, 0xda, 0x8a, 0xa9, 0x60, 0xc3, 0xd9, + 0xfa, 0x87, 0xb0, 0x12, 0x32, 0x4e, 0x2d, 0xb0, 0x01, 0xd3, 0x0d, 0x0f, 0x3d, 0xc7, 0xd9, 0x5b, + 0xc9, 0x39, 0x5b, 0xf1, 0xcd, 0x3c, 0x15, 0x19, 0x6e, 0xe1, 0x4f, 0x09, 0xc8, 0xfa, 0x59, 0xd0, + 0x06, 0x4c, 0x18, 0x58, 0x3f, 0x56, 0x1a, 0xc4, 0xae, 0x53, 0xeb, 0xab, 0x61, 0xd2, 0xf2, 0x35, + 0xc2, 0xb7, 0x13, 0x13, 0x9d, 0x29, 0x68, 0x1f, 0xb2, 0xc7, 0x5a, 0xbb, 0xd7, 0xc1, 0x12, 0x3e, + 0xe9, 0xca, 0xaa, 0x7b, 0x00, 0x53, 0xeb, 0x6b, 0xa1, 0x62, 0x0e, 0xec, 0x09, 0x25, 0x87, 0x7f, + 0x27, 0x26, 0x9e, 0x3b, 0x66, 0x49, 0xfc, 0x4f, 0x38, 0x98, 0xa0, 0xab, 0xa1, 0xb7, 0x21, 0x69, + 0x9e, 0x76, 0x89, 0x76, 0xb3, 0xeb, 0xd7, 0x86, 0x69, 0x97, 0xaf, 0x9f, 0x76, 0xb1, 0x68, 0x4f, + 0x11, 0xde, 0x87, 0xa4, 0xf5, 0x0b, 0x4d, 0xc1, 0xc4, 0x7e, 0xf5, 0x61, 0x75, 0xf7, 0x71, 0x35, + 0x1b, 0x43, 0x8b, 0x80, 0x8a, 0xbb, 0xd5, 0xba, 0xb8, 0x5b, 0xa9, 0x94, 0x44, 0xa9, 0x56, 0x12, + 0x0f, 0xca, 0xc5, 0x52, 0x96, 0x43, 0xaf, 0xc0, 0xea, 0xc1, 0x6e, 0x65, 0xff, 0x51, 0x49, 0x2a, + 0x14, 0x8b, 0xa5, 0x5a, 0xad, 0xbc, 0x59, 0xae, 0x94, 0xeb, 0x4f, 0xa4, 0xe2, 0x6e, 0xb5, 0x56, + 0x17, 0x0b, 0xe5, 0x6a, 0xbd, 0x96, 0x8d, 0xf3, 0xdf, 0xe7, 0xe0, 0x9c, 0x6f, 0x03, 0xa8, 0xc0, + 0x68, 0x78, 0x7b, 0xd4, 0x8d, 0x7b, 0x35, 0xbd, 0x15, 0xa4, 0x29, 0x40, 0x7a, 0xb7, 0x5a, 0x29, + 0x57, 0x2d, 0xed, 0xa6, 0x60, 0x62, 0xf7, 0xc1, 0x03, 0xfb, 0x47, 0x7c, 0x33, 0x4d, 0x16, 0x14, + 0x66, 0x61, 0x7a, 0x4f, 0xd7, 0x0e, 0xb1, 0xe3, 0x3f, 0x05, 0x98, 0xa1, 0xbf, 0xa9, 0xbf, 0xfc, + 0x1f, 0xa4, 0x74, 0x2c, 0x37, 0x4f, 0xe9, 0xd1, 0xf2, 0x79, 0x12, 0x93, 0x79, 0x27, 0x26, 0xf3, + 0x9b, 0x9a, 0xd6, 0x3e, 0xb0, 0xfc, 0x53, 0x24, 0x8c, 0xc2, 0x57, 0x49, 0x98, 0x2b, 0xea, 0x58, + 0x36, 0x31, 0xd1, 0x96, 0x8a, 0x0e, 0x8c, 0xbd, 0x0d, 0x98, 0xb5, 0xfc, 0xab, 0xa1, 0x98, 0xa7, + 0x92, 0x2e, 0xab, 0x2d, 0x4c, 0x8f, 0x7e, 0xc1, 0xb1, 0x40, 0x91, 0x8e, 0x8a, 0xd6, 0xa0, 0x38, + 0xd3, 0xf0, 0xfe, 0x44, 0x65, 0x98, 0xa3, 0xae, 0xc3, 0xb8, 0x74, 0x82, 0x75, 0x69, 0xa2, 0x85, + 0xc7, 0xa5, 0xd1, 0x31, 0x4b, 0x51, 0xb0, 0x81, 0x1e, 0x02, 0x74, 0x65, 0x5d, 0xee, 0x60, 0x13, + 0xeb, 0x46, 0x2e, 0xc9, 0xc6, 0x77, 0xc0, 0x6e, 0xf2, 0x7b, 0x2e, 0x37, 0x89, 0x6f, 0xcf, 0x74, + 0xb4, 0x6d, 0x05, 0x44, 0x43, 0xc7, 0xa6, 0x91, 0x4b, 0xd9, 0x92, 0xd6, 0xa2, 0x24, 0xd5, 0x08, + 0xab, 0x2d, 0x66, 0x33, 0xf1, 0xd3, 0x4d, 0x4e, 0x74, 0x66, 0xa3, 0x5d, 0x58, 0x70, 0x36, 0xa8, + 0xa9, 0x26, 0x56, 0x4d, 0xc9, 0xd0, 0x7a, 0x7a, 0x03, 0xe7, 0xd2, 0xb6, 0x95, 0x96, 0x7c, 0x5b, + 0x24, 0x3c, 0x35, 0x9b, 0x45, 0xa4, 0xa6, 0x61, 0x88, 0xe8, 0x29, 0xf0, 0x72, 0xa3, 0x81, 0x0d, + 0x43, 0x21, 0xb6, 0x90, 0x74, 0xfc, 0x49, 0x4f, 0xd1, 0x71, 0x07, 0xab, 0xa6, 0x91, 0x9b, 0x60, + 0xa5, 0xd6, 0xb5, 0xae, 0xd6, 0xd6, 0x5a, 0xa7, 0x62, 0x9f, 0x47, 0xbc, 0xc8, 0x4c, 0xf7, 0x8c, + 0x18, 0xfc, 0x3b, 0x70, 0xce, 0x67, 0x94, 0x71, 0x32, 0x1b, 0x7f, 0x0f, 0xa6, 0xbd, 0x96, 0x18, + 0x2b, 0x2b, 0xfe, 0x28, 0x0e, 0x73, 0x01, 0x36, 0x40, 0x3b, 0x90, 0x31, 0x54, 0xb9, 0x6b, 0x3c, + 0xd3, 0x4c, 0xea, 0xbf, 0x37, 0x23, 0x4c, 0x96, 0xaf, 0x51, 0x5e, 0xf2, 0x73, 0x27, 0x26, 0xba, + 0xb3, 0xd1, 0x26, 0xa4, 0x89, 0x3d, 0xfd, 0xb9, 0x29, 0x48, 0x0e, 0xa1, 0xb9, 0x52, 0xe8, 0x4c, + 0xfe, 0x75, 0x98, 0x65, 0x57, 0x40, 0x97, 0x61, 0xca, 0x59, 0x41, 0x52, 0x9a, 0x74, 0xaf, 0xe0, + 0x90, 0xca, 0x4d, 0xfe, 0x35, 0x98, 0xf6, 0x0a, 0x43, 0x4b, 0x30, 0x49, 0x1d, 0xc2, 0x65, 0xcf, + 0x10, 0x42, 0xb9, 0xe9, 0xc6, 0xf4, 0x37, 0x60, 0x9e, 0xf5, 0x33, 0x1a, 0xca, 0xd7, 0xdd, 0x3d, + 0x10, 0x5b, 0xcc, 0xb2, 0x7b, 0x70, 0xf4, 0x14, 0x7e, 0x99, 0x84, 0xac, 0x3f, 0x68, 0xd0, 0x06, + 0xa4, 0x0e, 0xdb, 0x5a, 0xe3, 0x63, 0x3a, 0xf7, 0x95, 0xb0, 0xe8, 0xca, 0x6f, 0x5a, 0x5c, 0x84, + 0xba, 0x13, 0x13, 0xc9, 0x24, 0x6b, 0x76, 0x47, 0xeb, 0xa9, 0x26, 0xb5, 0x5e, 0xf8, 0xec, 0x47, + 0x16, 0x57, 0x7f, 0xb6, 0x3d, 0x09, 0x6d, 0xc1, 0x14, 0x71, 0x3b, 0xa9, 0xa3, 0x35, 0x71, 0x2e, + 0x61, 0xcb, 0xb8, 0x1a, 0x2a, 0xa3, 0x60, 0xf3, 0x3e, 0xd2, 0x9a, 0x58, 0x04, 0xd9, 0xfd, 0x9f, + 0x9f, 0x81, 0x29, 0x8f, 0x6e, 0xfc, 0x36, 0x4c, 0x79, 0x16, 0x43, 0x17, 0x60, 0xe2, 0xc8, 0x90, + 0xdc, 0x24, 0x3c, 0x29, 0xa6, 0x8f, 0x0c, 0x3b, 0x9f, 0x5e, 0x86, 0x29, 0x5b, 0x0b, 0xe9, 0xa8, + 0x2d, 0xb7, 0x8c, 0x5c, 0x7c, 0x35, 0x61, 0x9d, 0x91, 0x4d, 0x7a, 0x60, 0x51, 0xf8, 0x7f, 0x70, + 0x00, 0xfd, 0x25, 0xd1, 0x06, 0x24, 0x6d, 0x2d, 0x49, 0x2a, 0x5f, 0x1b, 0x41, 0xcb, 0xbc, 0xad, + 0xaa, 0x3d, 0x4b, 0xf8, 0x39, 0x07, 0x49, 0x5b, 0x8c, 0xff, 0xc2, 0xa9, 0x95, 0xab, 0xdb, 0x95, + 0x92, 0x54, 0xdd, 0xdd, 0x2a, 0x49, 0x8f, 0xc5, 0x72, 0xbd, 0x24, 0x66, 0x39, 0xb4, 0x04, 0x17, + 0xbc, 0x74, 0xb1, 0x54, 0xd8, 0x2a, 0x89, 0xd2, 0x6e, 0xb5, 0xf2, 0x24, 0x1b, 0x47, 0x3c, 0x2c, + 0x3e, 0xda, 0xaf, 0xd4, 0xcb, 0x83, 0x63, 0x09, 0xb4, 0x0c, 0x39, 0xcf, 0x18, 0x95, 0x41, 0xc5, + 0x26, 0x2d, 0xb1, 0x9e, 0x51, 0xf2, 0x2f, 0x1d, 0x4c, 0x6d, 0xce, 0xb8, 0x87, 0x61, 0x3b, 0xdb, + 0x63, 0x98, 0x61, 0x72, 0xb4, 0x55, 0x4e, 0xd1, 0xa4, 0xd2, 0x94, 0x0e, 0x4f, 0x4d, 0xbb, 0xc4, + 0xe0, 0xd6, 0x12, 0xe2, 0x8c, 0x43, 0xdd, 0xb4, 0x88, 0x96, 0x59, 0xdb, 0x4a, 0x47, 0x31, 0x29, + 0x4f, 0xdc, 0xe6, 0x01, 0x9b, 0x64, 0x33, 0x08, 0x5f, 0xc4, 0x21, 0x4d, 0xcf, 0xe6, 0x9a, 0xe7, + 0x96, 0x60, 0x44, 0x3a, 0x54, 0x22, 0x92, 0x09, 0x8e, 0x38, 0x1b, 0x1c, 0x68, 0x07, 0x66, 0xbd, + 0xa9, 0xf4, 0xc4, 0x29, 0xe2, 0xae, 0xb0, 0x07, 0xe4, 0x8d, 0xe7, 0x13, 0x5a, 0xba, 0xcd, 0x1c, + 0x7b, 0x69, 0x68, 0x13, 0x66, 0x7d, 0xd9, 0x38, 0x39, 0x3c, 0x1b, 0xcf, 0x34, 0x98, 0xc4, 0x54, + 0x80, 0x39, 0x27, 0x91, 0xb6, 0xb1, 0x64, 0xd2, 0x44, 0x4b, 0x6f, 0x8b, 0xec, 0x40, 0x02, 0x46, + 0x7d, 0x66, 0x87, 0xc6, 0xbf, 0x0b, 0x68, 0x50, 0xd7, 0xb1, 0xb2, 0x66, 0x0f, 0xe6, 0x02, 0x52, + 0x3c, 0xca, 0xc3, 0xa4, 0x7d, 0x54, 0x86, 0x62, 0x62, 0x5a, 0x1e, 0x0e, 0x6a, 0xd4, 0x67, 0xb1, + 0xf8, 0xbb, 0x3a, 0x3e, 0xc2, 0xba, 0x8e, 0x9b, 0x76, 0x78, 0x04, 0xf2, 0xbb, 0x2c, 0xc2, 0x0f, + 0x38, 0xc8, 0x38, 0x74, 0x74, 0x0f, 0x32, 0x06, 0x6e, 0x91, 0xeb, 0x87, 0xac, 0x75, 0xc9, 0x3f, + 0x37, 0x5f, 0xa3, 0x0c, 0xb4, 0x90, 0x76, 0xf8, 0xad, 0x42, 0x9a, 0x19, 0x1a, 0x6b, 0xf3, 0xbf, + 0xe5, 0x60, 0x6e, 0x0b, 0xb7, 0xb1, 0xbf, 0x4a, 0x89, 0xca, 0xb0, 0xde, 0x8b, 0x3d, 0xce, 0x5e, + 0xec, 0x01, 0xa2, 0x22, 0x2e, 0xf6, 0x33, 0x5d, 0x76, 0x8b, 0x30, 0xcf, 0xae, 0x46, 0xd2, 0xbb, + 0xf0, 0xcf, 0x04, 0x5c, 0xb2, 0x7c, 0x41, 0xd7, 0xda, 0x6d, 0xac, 0xef, 0xf5, 0x0e, 0xdb, 0x8a, + 0xf1, 0x6c, 0x8c, 0xcd, 0x5d, 0x80, 0x09, 0x55, 0x6b, 0x7a, 0x82, 0x27, 0x6d, 0xfd, 0x2c, 0x37, + 0x51, 0x09, 0xce, 0xfb, 0xcb, 0xac, 0x53, 0x9a, 0x84, 0xc3, 0x8b, 0xac, 0xec, 0xb1, 0xff, 0x06, + 0xe1, 0x21, 0x63, 0x15, 0x88, 0x9a, 0xda, 0x3e, 0xb5, 0x23, 0x26, 0x23, 0xba, 0xbf, 0x91, 0xe8, + 0xaf, 0x98, 0xde, 0x70, 0x2b, 0xa6, 0xc8, 0x1d, 0x45, 0x15, 0x4f, 0x1f, 0x0d, 0x44, 0x7c, 0xda, + 0x16, 0xfd, 0xf6, 0x88, 0xa2, 0x87, 0x66, 0x82, 0xb3, 0x9c, 0xe2, 0x0b, 0x08, 0xdf, 0x3f, 0x72, + 0x70, 0x39, 0x74, 0x0b, 0xf4, 0xca, 0x6f, 0xc2, 0xb9, 0x2e, 0x19, 0x70, 0x8d, 0x40, 0xa2, 0xec, + 0xfe, 0x50, 0x23, 0xd0, 0x2e, 0x96, 0x52, 0x19, 0x33, 0xcc, 0x76, 0x19, 0x22, 0x5f, 0x80, 0xb9, + 0x00, 0xb6, 0xb1, 0x36, 0xf3, 0x25, 0x07, 0xab, 0x7d, 0x55, 0xf6, 0xd5, 0xee, 0x8b, 0x73, 0xdf, + 0x7a, 0xdf, 0xb7, 0x48, 0xca, 0xbf, 0x3b, 0xb8, 0xf7, 0xe0, 0x05, 0x5f, 0x56, 0x04, 0x5f, 0x85, + 0x2b, 0x11, 0x4b, 0xd3, 0x70, 0xfe, 0x22, 0x09, 0x57, 0x0e, 0xe4, 0xb6, 0xd2, 0x74, 0x0b, 0xb9, + 0x80, 0x7e, 0x3f, 0xda, 0x24, 0x8d, 0x81, 0x08, 0x20, 0x59, 0x6b, 0xc3, 0x8d, 0xda, 0x61, 0xf2, + 0x47, 0xb8, 0x0e, 0x5f, 0x60, 0x13, 0xf6, 0x24, 0xa0, 0x09, 0x7b, 0x7b, 0x74, 0x5d, 0xa3, 0x5a, + 0xb2, 0x7d, 0x7f, 0x82, 0x79, 0x6b, 0x74, 0xb9, 0x11, 0x5e, 0x70, 0xe6, 0x28, 0xfe, 0x3a, 0xbb, + 0xa6, 0xdf, 0x27, 0x41, 0x88, 0xda, 0x3d, 0xcd, 0x21, 0x22, 0x4c, 0x36, 0x34, 0xf5, 0x48, 0xd1, + 0x3b, 0xb8, 0x49, 0xab, 0xff, 0x37, 0x47, 0x31, 0x1e, 0x4d, 0x20, 0x45, 0x67, 0xae, 0xd8, 0x17, + 0x83, 0x72, 0x30, 0xd1, 0xc1, 0x86, 0x21, 0xb7, 0x1c, 0xb5, 0x9c, 0x9f, 0xfc, 0x67, 0x09, 0x98, + 0x74, 0xa7, 0x20, 0x75, 0xc0, 0x83, 0x49, 0xfa, 0xda, 0x7e, 0x1e, 0x05, 0x9e, 0xdf, 0x99, 0xe3, + 0xcf, 0xe1, 0xcc, 0x4d, 0xc6, 0x99, 0x49, 0x38, 0x6c, 0x3d, 0x97, 0xda, 0x11, 0x7e, 0xfd, 0xb5, + 0x3b, 0xa0, 0xf0, 0x6d, 0x40, 0x15, 0xc5, 0xa0, 0x5d, 0x94, 0x9b, 0x96, 0xac, 0xa6, 0x49, 0x3e, + 0x91, 0xb0, 0x6a, 0xea, 0x0a, 0x2d, 0xd7, 0x53, 0x22, 0x74, 0xe4, 0x93, 0x12, 0xa1, 0x58, 0x25, + 0xbd, 0x61, 0xca, 0xba, 0xa9, 0xa8, 0x2d, 0xc9, 0xd4, 0x3e, 0xc6, 0x2e, 0xe8, 0xea, 0x50, 0xeb, + 0x16, 0x51, 0xf8, 0x34, 0x0e, 0x73, 0x8c, 0x78, 0xea, 0x93, 0xf7, 0x61, 0xa2, 0x2f, 0x9b, 0x29, + 0xe3, 0x03, 0xb8, 0xf3, 0xc4, 0x6c, 0xce, 0x0c, 0xb4, 0x02, 0xa0, 0xe2, 0x13, 0x93, 0x59, 0x77, + 0xd2, 0xa2, 0xd8, 0x6b, 0xf2, 0x1b, 0x6e, 0xcf, 0x6d, 0xca, 0x66, 0xcf, 0x40, 0xb7, 0x00, 0xd1, + 0x0c, 0x8d, 0x9b, 0x12, 0xbd, 0x62, 0xc8, 0xb2, 0x93, 0x62, 0xd6, 0x1d, 0xa9, 0xda, 0x97, 0x8d, + 0xc1, 0x7f, 0x02, 0x29, 0x62, 0xc4, 0x11, 0xbb, 0x6d, 0xf4, 0x2e, 0xa4, 0x0d, 0x7b, 0x21, 0x3f, + 0xb2, 0x10, 0xb4, 0x13, 0xaf, 0x62, 0x22, 0x9d, 0x27, 0x7c, 0x16, 0x07, 0xb4, 0x8d, 0x4d, 0xb7, + 0x0d, 0xa3, 0x67, 0x10, 0xe2, 0xcb, 0xdc, 0x73, 0xf8, 0xf2, 0x7b, 0x8c, 0x2f, 0x93, 0x68, 0xb8, + 0xe9, 0x41, 0xbf, 0x7d, 0x4b, 0x47, 0x66, 0xe2, 0x90, 0xd6, 0x87, 0xd4, 0x93, 0xa3, 0xb5, 0x3e, + 0x67, 0x74, 0xd9, 0x2d, 0x98, 0x63, 0x74, 0xa6, 0x3e, 0x75, 0x1b, 0x90, 0x7c, 0x2c, 0x2b, 0x6d, + 0xd9, 0xd2, 0xcb, 0xe9, 0x2c, 0x69, 0xa7, 0x79, 0xde, 0x1d, 0x71, 0xa6, 0x09, 0x82, 0xb7, 0x60, + 0xa1, 0xf2, 0xfc, 0x68, 0x7c, 0xdb, 0x7b, 0xd1, 0x0f, 0xf0, 0xd0, 0x75, 0xb7, 0x03, 0x11, 0xf9, + 0xab, 0x83, 0x45, 0x0a, 0x85, 0xa7, 0x43, 0xc1, 0xf9, 0x5f, 0x25, 0x60, 0x29, 0x82, 0x1b, 0xdd, + 0x87, 0x84, 0xde, 0x6d, 0x50, 0x77, 0x7c, 0x75, 0x04, 0xf9, 0x79, 0x71, 0xaf, 0xb8, 0x13, 0x13, + 0xad, 0x59, 0xfc, 0x1f, 0xe2, 0x90, 0x10, 0xf7, 0x8a, 0xe8, 0x5d, 0x06, 0xa9, 0xbe, 0x35, 0xa2, + 0x14, 0x2f, 0x50, 0xfd, 0x1f, 0x2e, 0x08, 0xa9, 0xce, 0xc1, 0x7c, 0x51, 0x2c, 0x15, 0xea, 0x25, + 0x69, 0xab, 0x54, 0x29, 0xd5, 0x4b, 0x12, 0x41, 0xd2, 0xb3, 0x1c, 0x5a, 0x86, 0xdc, 0xde, 0xfe, + 0x66, 0xa5, 0x5c, 0xdb, 0x91, 0xf6, 0xab, 0xce, 0x7f, 0x74, 0x34, 0x8e, 0xb2, 0x30, 0x5d, 0x29, + 0xd7, 0xea, 0x94, 0x50, 0xcb, 0x26, 0x2c, 0xca, 0x76, 0xa9, 0x2e, 0x15, 0x0b, 0x7b, 0x85, 0x62, + 0xb9, 0xfe, 0x24, 0x9b, 0x44, 0x3c, 0x2c, 0xb2, 0xb2, 0x6b, 0xd5, 0xc2, 0x5e, 0x6d, 0x67, 0xb7, + 0x9e, 0x4d, 0x21, 0x04, 0xb3, 0xf6, 0x7c, 0x87, 0x54, 0xcb, 0xa6, 0x2d, 0x09, 0xc5, 0xca, 0x6e, + 0xd5, 0xd5, 0x61, 0x02, 0xcd, 0x43, 0xd6, 0x59, 0x59, 0x2c, 0x15, 0xb6, 0x6c, 0x14, 0x25, 0x83, + 0xce, 0xc3, 0x4c, 0xe9, 0x83, 0xbd, 0x42, 0x75, 0xcb, 0x61, 0x9c, 0x44, 0xab, 0xb0, 0xec, 0x55, + 0x47, 0xa2, 0xb3, 0x4a, 0x5b, 0x36, 0x96, 0x52, 0xcb, 0x82, 0x8b, 0xd2, 0x7d, 0x19, 0x87, 0x05, + 0x02, 0xd3, 0x39, 0xa0, 0xa0, 0x13, 0xb8, 0x6b, 0x90, 0x25, 0xc0, 0x82, 0xe4, 0x2f, 0xed, 0x66, + 0x09, 0xfd, 0xc0, 0x29, 0xf0, 0x1c, 0x48, 0x3d, 0xee, 0x81, 0xd4, 0xcb, 0xfe, 0x72, 0xf7, 0x26, + 0x0b, 0x3e, 0xfb, 0x56, 0x8b, 0xea, 0xa0, 0x1e, 0x05, 0xd4, 0x63, 0xb7, 0xa3, 0xa5, 0x45, 0xdd, + 0x55, 0x67, 0x69, 0x97, 0xce, 0x18, 0xf2, 0x0f, 0x60, 0xd1, 0xaf, 0x2f, 0x8d, 0xbe, 0x5b, 0x03, + 0x10, 0xb1, 0x9b, 0x83, 0x5c, 0x5e, 0x97, 0x43, 0xf8, 0x0b, 0x07, 0x19, 0x87, 0x6c, 0xdd, 0x23, + 0x86, 0xf2, 0x5d, 0xcc, 0x40, 0x52, 0x93, 0x16, 0xc5, 0x45, 0xb8, 0xbc, 0xe0, 0x6e, 0xdc, 0x0f, + 0xee, 0x06, 0x9e, 0x73, 0x22, 0xf0, 0x9c, 0xbf, 0x09, 0x33, 0x0d, 0x4b, 0x7d, 0x45, 0x53, 0x25, + 0x53, 0xe9, 0x38, 0x88, 0xd3, 0xe0, 0x63, 0x4c, 0xdd, 0x79, 0x41, 0x15, 0xa7, 0x9d, 0x09, 0x16, + 0x09, 0xad, 0xc2, 0xb4, 0xfd, 0x38, 0x23, 0x99, 0x9a, 0xd4, 0x33, 0x70, 0x2e, 0x65, 0xf7, 0xdf, + 0x60, 0xd3, 0xea, 0xda, 0xbe, 0x81, 0x85, 0xdf, 0x71, 0xb0, 0x40, 0x60, 0x05, 0xbf, 0x3b, 0x0e, + 0x03, 0xa9, 0xbd, 0x1e, 0xe7, 0xbb, 0x1a, 0x02, 0x05, 0xbe, 0xac, 0xae, 0x2a, 0x07, 0x8b, 0xfe, + 0xf5, 0x68, 0x2b, 0xf5, 0x79, 0x1c, 0xe6, 0xad, 0xdb, 0xd4, 0x19, 0x78, 0xd1, 0x65, 0xca, 0x18, + 0x27, 0xe9, 0x33, 0x66, 0x72, 0xc0, 0x98, 0x3b, 0xfe, 0x46, 0xe5, 0x86, 0xb7, 0x1e, 0xf0, 0xef, + 0xe0, 0x65, 0xd9, 0xf2, 0xd7, 0x1c, 0x2c, 0xf8, 0xd6, 0xa3, 0xf1, 0xf2, 0x8e, 0xbf, 0xf2, 0xba, + 0x1a, 0xa2, 0xdf, 0x73, 0xd5, 0x5e, 0x77, 0x9d, 0xea, 0x69, 0xbc, 0xb0, 0xfc, 0x73, 0x1c, 0x56, + 0xfa, 0x37, 0x90, 0xfd, 0x3c, 0xda, 0x1c, 0x03, 0x3a, 0x38, 0xdb, 0x2b, 0xe4, 0xfb, 0xfe, 0x84, + 0xbb, 0x3e, 0x78, 0x29, 0x06, 0xa8, 0x14, 0x95, 0x78, 0x03, 0x11, 0xb7, 0xe4, 0xb8, 0x88, 0xdb, + 0x99, 0x3c, 0xe0, 0x7b, 0x5e, 0x30, 0x91, 0x55, 0x9f, 0x7a, 0xc2, 0x88, 0xa8, 0xfc, 0x5b, 0x70, + 0xc1, 0x2e, 0x9a, 0xdd, 0xd7, 0x7d, 0xe7, 0xcd, 0x91, 0xa4, 0xc4, 0x8c, 0xb8, 0x60, 0x0d, 0xbb, + 0x4f, 0xda, 0x14, 0x89, 0x6e, 0x0a, 0x5f, 0x25, 0x61, 0xd1, 0x2a, 0xaa, 0x6b, 0xa6, 0xdc, 0x1a, + 0x07, 0xa3, 0xfd, 0xd6, 0x20, 0xe4, 0x15, 0x67, 0x8f, 0x25, 0x58, 0xea, 0x28, 0x48, 0x17, 0xca, + 0xc3, 0x9c, 0x61, 0xca, 0x2d, 0x3b, 0x1d, 0xc8, 0x7a, 0x0b, 0x9b, 0x52, 0x57, 0x36, 0x9f, 0xd1, + 0x58, 0x3f, 0x4f, 0x87, 0xea, 0xf6, 0xc8, 0x9e, 0x6c, 0x3e, 0x7b, 0x41, 0x07, 0x89, 0xde, 0xf3, + 0x27, 0x85, 0xd7, 0x86, 0xec, 0x25, 0xc2, 0xb7, 0x3e, 0x08, 0x81, 0x45, 0x5f, 0x1f, 0x22, 0x72, + 0x38, 0x1c, 0x7a, 0x76, 0x18, 0xf0, 0x6b, 0x46, 0x54, 0x2f, 0xc2, 0x85, 0x81, 0xcd, 0xd3, 0x2b, + 0xa4, 0x05, 0x39, 0x6b, 0x68, 0x5f, 0x35, 0xc6, 0x74, 0xc7, 0x10, 0x8f, 0x89, 0x87, 0x78, 0x8c, + 0xb0, 0x04, 0x17, 0x03, 0x16, 0xa2, 0x5a, 0xfc, 0x26, 0x45, 0xd4, 0x18, 0x1f, 0xdc, 0xff, 0x30, + 0x2c, 0x2a, 0xde, 0xf4, 0x1e, 0x7b, 0x20, 0x0e, 0xfe, 0x32, 0xe2, 0xe2, 0x32, 0x4c, 0x79, 0xf9, + 0xe8, 0x35, 0x68, 0x0e, 0x09, 0x9c, 0xd4, 0x99, 0xde, 0x1c, 0xd2, 0xbe, 0x37, 0x87, 0x4a, 0x3f, + 0xa8, 0x26, 0xd8, 0xd2, 0x36, 0xd4, 0x14, 0x11, 0x61, 0xf5, 0x74, 0x20, 0xac, 0x32, 0xec, 0x43, + 0x46, 0xa8, 0xd0, 0xff, 0x81, 0xc0, 0xa2, 0x4e, 0x1d, 0xf8, 0xc2, 0x20, 0x3c, 0x05, 0x9e, 0x78, + 0xfc, 0xf8, 0x98, 0xbf, 0xcf, 0x8d, 0xe2, 0x7e, 0x37, 0x12, 0x56, 0x60, 0x29, 0x50, 0x36, 0x5d, + 0xfa, 0x87, 0x1c, 0x51, 0x6c, 0x1b, 0x9b, 0x7d, 0x64, 0xc5, 0x18, 0x75, 0x69, 0x3a, 0xe8, 0x5d, + 0x9a, 0x90, 0x6c, 0x0f, 0x1e, 0x33, 0x24, 0x84, 0x6d, 0x62, 0x06, 0xbf, 0x2a, 0xf4, 0xb2, 0xbd, + 0x01, 0xa9, 0x9e, 0x0d, 0x97, 0x92, 0xa2, 0x6b, 0x8e, 0x8d, 0x81, 0x7d, 0x6b, 0x48, 0x24, 0x1c, + 0xc2, 0xe7, 0x1c, 0x4c, 0x79, 0xc8, 0x68, 0x19, 0x26, 0x5d, 0xf4, 0xc2, 0xe9, 0x52, 0x5c, 0x82, + 0x75, 0x68, 0xa6, 0x66, 0xca, 0x6d, 0xfa, 0x02, 0x4f, 0x7e, 0x58, 0x8d, 0x65, 0xcf, 0xc0, 0xa4, + 0x88, 0x4d, 0x88, 0xf6, 0xff, 0xe8, 0x16, 0x24, 0x7b, 0xaa, 0x62, 0xda, 0xc1, 0x3a, 0xeb, 0x8f, + 0x42, 0x7b, 0xa9, 0xfc, 0xbe, 0xaa, 0x98, 0xa2, 0xcd, 0x25, 0xdc, 0x84, 0xa4, 0xf5, 0x8b, 0x6d, + 0xf2, 0x27, 0x21, 0xb5, 0xf9, 0xa4, 0x5e, 0xaa, 0x65, 0x39, 0x04, 0x90, 0x2e, 0x93, 0x96, 0x38, + 0x2e, 0x2c, 0xbb, 0x5b, 0x0f, 0x02, 0x51, 0x3e, 0x22, 0x67, 0x18, 0x06, 0x9f, 0x14, 0x02, 0xe1, + 0x93, 0x15, 0xe6, 0x36, 0x1b, 0x02, 0x9c, 0xfc, 0x8b, 0x83, 0x85, 0x40, 0x3e, 0x74, 0xd7, 0x0b, + 0x99, 0x5c, 0x89, 0x94, 0xe9, 0x05, 0x4b, 0x7e, 0xc6, 0x11, 0xb0, 0xe4, 0x1e, 0x03, 0x96, 0x5c, + 0x1f, 0x3a, 0xdf, 0x0b, 0x93, 0x1c, 0x84, 0xa0, 0x24, 0xb5, 0x7a, 0x61, 0xbb, 0x24, 0xed, 0x57, + 0xc9, 0x5f, 0x17, 0x25, 0x99, 0x87, 0xec, 0x76, 0xc9, 0xc1, 0x1d, 0xa4, 0x5a, 0xbd, 0x50, 0xaf, + 0x65, 0xe3, 0x83, 0x08, 0x45, 0xc2, 0xc5, 0x1f, 0xe6, 0x01, 0x51, 0xb3, 0x7a, 0x3f, 0x9d, 0xfd, + 0x94, 0x83, 0x39, 0x86, 0x4c, 0xad, 0xec, 0x79, 0x5d, 0xe3, 0x98, 0xd7, 0xb5, 0x3b, 0x30, 0x6f, + 0xb5, 0x50, 0xc4, 0xf1, 0x0d, 0xa9, 0x8b, 0x75, 0x1b, 0x23, 0xa5, 0xee, 0x74, 0xbe, 0x23, 0x9f, + 0x50, 0x0c, 0x73, 0x0f, 0xeb, 0x96, 0xe0, 0x17, 0x80, 0xff, 0x09, 0x3f, 0x8e, 0x93, 0x8b, 0x7a, + 0xec, 0x42, 0x7f, 0x68, 0xd0, 0x0e, 0x76, 0x02, 0x89, 0x31, 0x3a, 0x81, 0x90, 0x90, 0x4f, 0x8e, + 0x55, 0x1d, 0x8e, 0x7d, 0xc9, 0x09, 0x05, 0x52, 0x14, 0x9c, 0xa1, 0x48, 0x5f, 0xff, 0x37, 0x07, + 0x99, 0x72, 0x13, 0xab, 0xa6, 0xe5, 0xf4, 0x55, 0x98, 0x61, 0xbe, 0x68, 0x46, 0xcb, 0x21, 0x1f, + 0x3a, 0xdb, 0x16, 0xe7, 0x57, 0x22, 0x3f, 0x83, 0x16, 0x62, 0xe8, 0xc8, 0xf3, 0x35, 0x36, 0x03, + 0x2b, 0xbf, 0x32, 0x30, 0x33, 0x20, 0xfe, 0xf9, 0x6b, 0x43, 0xb8, 0xdc, 0x75, 0xde, 0x82, 0x94, + 0xfd, 0xed, 0x2a, 0x9a, 0x77, 0xbf, 0x9f, 0xf5, 0x7c, 0xda, 0xca, 0x2f, 0xf8, 0xa8, 0xce, 0xbc, + 0xf5, 0xbf, 0x67, 0x00, 0xfa, 0xbd, 0x0e, 0x7a, 0x08, 0xd3, 0xde, 0xcf, 0xe7, 0xd0, 0x52, 0xc4, + 0xc7, 0x9b, 0xfc, 0x72, 0xf0, 0xa0, 0xab, 0xd3, 0x43, 0x98, 0xf6, 0x7e, 0xac, 0xd1, 0x17, 0x16, + 0xf0, 0xc1, 0x48, 0x5f, 0x58, 0xe0, 0xf7, 0x1d, 0x31, 0xd4, 0x86, 0x0b, 0x21, 0xcf, 0xf5, 0xe8, + 0xfa, 0x68, 0x1f, 0x35, 0xf0, 0xaf, 0x8e, 0xf8, 0xee, 0x2f, 0xc4, 0x90, 0x0e, 0x17, 0x43, 0x5f, + 0xa9, 0xd1, 0xda, 0xa8, 0x6f, 0xe8, 0xfc, 0x8d, 0x11, 0x38, 0xdd, 0x35, 0x7b, 0xc0, 0x87, 0x3f, + 0x8d, 0xa1, 0x1b, 0x23, 0xbf, 0xd9, 0xf2, 0x37, 0x47, 0x7f, 0x69, 0x13, 0x62, 0x68, 0x07, 0xa6, + 0x3c, 0xaf, 0x2d, 0x88, 0x0f, 0x7c, 0x82, 0x21, 0x82, 0x97, 0x22, 0x9e, 0x67, 0x88, 0x24, 0xcf, + 0xdb, 0x42, 0x5f, 0xd2, 0xe0, 0x23, 0x49, 0x5f, 0x52, 0xc0, 0x63, 0x84, 0xdf, 0xfc, 0xbe, 0xcb, + 0x2f, 0xc8, 0xfc, 0xc1, 0xb7, 0x67, 0x90, 0xf9, 0x43, 0x6e, 0x52, 0x21, 0x86, 0xde, 0x87, 0x59, + 0x16, 0x26, 0x45, 0x2b, 0x91, 0x70, 0x2f, 0x7f, 0x29, 0x6c, 0xd8, 0x2b, 0x92, 0x45, 0xe5, 0xfa, + 0x22, 0x03, 0xd1, 0xc1, 0xbe, 0xc8, 0x10, 0x30, 0x2f, 0x66, 0xe5, 0x27, 0x06, 0x6b, 0xea, 0xe7, + 0xa7, 0x20, 0x88, 0xac, 0x9f, 0x9f, 0x02, 0x01, 0x2a, 0x21, 0x86, 0x14, 0x58, 0x0c, 0x86, 0x3a, + 0xd0, 0xb5, 0x91, 0x90, 0x1c, 0xfe, 0xfa, 0x30, 0x36, 0x37, 0xd5, 0xfc, 0x2d, 0x05, 0x49, 0xfb, + 0x16, 0xac, 0xc3, 0x39, 0x5f, 0xab, 0x89, 0x2e, 0x45, 0x37, 0xe0, 0xfc, 0xe5, 0xd0, 0x71, 0x77, + 0x27, 0x4f, 0xe1, 0xfc, 0x40, 0xf3, 0x88, 0x56, 0xbd, 0xf3, 0x82, 0x1a, 0x58, 0xfe, 0x4a, 0x04, + 0x87, 0x5f, 0x36, 0x9b, 0x76, 0x56, 0x87, 0x75, 0x37, 0xac, 0xec, 0xb0, 0x54, 0xf3, 0x11, 0x29, + 0x3a, 0xfc, 0x49, 0x46, 0x60, 0xf5, 0x0a, 0x4c, 0x2f, 0x57, 0x23, 0x79, 0xdc, 0x15, 0x3e, 0x74, + 0xab, 0x1d, 0x4f, 0x75, 0x8d, 0x18, 0xe5, 0x02, 0x9b, 0x00, 0x5e, 0x88, 0x62, 0x71, 0xc5, 0x3f, + 0x86, 0xac, 0xff, 0x0a, 0x46, 0xcc, 0x79, 0x05, 0xb9, 0xcd, 0x6a, 0x38, 0x83, 0xdf, 0x32, 0xfe, + 0xf8, 0xf7, 0x6b, 0x15, 0x14, 0xf9, 0x57, 0x23, 0x79, 0xbc, 0x19, 0xcb, 0x53, 0xf0, 0xf5, 0x33, + 0xd6, 0x60, 0x71, 0xd8, 0xcf, 0x58, 0x01, 0x15, 0xa2, 0x10, 0xbb, 0xf7, 0x0e, 0x40, 0xc3, 0x50, + 0x24, 0xd2, 0x11, 0xa3, 0x95, 0x81, 0xc7, 0x89, 0x07, 0x0a, 0x6e, 0x37, 0x77, 0xbb, 0xa6, 0xa2, + 0xa9, 0x46, 0xee, 0x17, 0x19, 0xbb, 0x1d, 0x9f, 0x6c, 0x18, 0x0a, 0x69, 0x4c, 0x37, 0x53, 0x4f, + 0x13, 0x0d, 0x43, 0x39, 0x4c, 0xdb, 0xfc, 0x6f, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, + 0xfe, 0x46, 0x7f, 0x36, 0x00, 0x00, +} + // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -4698,6 +4842,20 @@ type IdentityServer interface { Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) } +// UnimplementedIdentityServer can be embedded to have forward compatible implementations. +type UnimplementedIdentityServer struct { +} + +func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented") +} +func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented") +} +func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented") +} + func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { s.RegisterService(&_Identity_serviceDesc, srv) } @@ -4927,6 +5085,47 @@ type ControllerServer interface { ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) } +// UnimplementedControllerServer can be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented") +} +func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented") +} +func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented") +} +func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented") +} +func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented") +} +func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented") +} +func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented") +} + func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { s.RegisterService(&_Controller_serviceDesc, srv) } @@ -5310,6 +5509,35 @@ type NodeServer interface { NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) } +// UnimplementedNodeServer can be embedded to have forward compatible implementations. +type UnimplementedNodeServer struct { +} + +func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented") +} +func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented") +} +func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented") +} +func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented") +} + func RegisterNodeServer(s *grpc.Server, srv NodeServer) { s.RegisterService(&_Node_serviceDesc, srv) } @@ -5498,216 +5726,3 @@ var _Node_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "github.com/container-storage-interface/spec/csi.proto", } - -func init() { - proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_csi_2c5455657a82ae49) -} - -var fileDescriptor_csi_2c5455657a82ae49 = []byte{ - // 3276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x70, 0xdb, 0xc6, - 0x15, 0x26, 0xf8, 0x23, 0x51, 0x4f, 0x3f, 0xa6, 0x57, 0x3f, 0xa6, 0x21, 0xc9, 0x96, 0xe1, 0xd8, - 0x51, 0x1c, 0x9b, 0x6a, 0x94, 0x38, 0xd3, 0xd8, 0x4e, 0x1b, 0x8a, 0xa2, 0x25, 0xc6, 0x34, 0xa9, - 0x80, 0x94, 0x1c, 0xbb, 0xcd, 0x20, 0x10, 0xb9, 0xa2, 0x31, 0x21, 0x01, 0x06, 0x00, 0x55, 0xa9, - 0x97, 0xce, 0xb4, 0xa7, 0x4c, 0xcf, 0x9d, 0xb6, 0xa7, 0xce, 0xa4, 0xbd, 0xb4, 0xd3, 0x4c, 0x4f, - 0x9d, 0x1e, 0x3b, 0xd3, 0x63, 0x0f, 0xbd, 0xb6, 0x93, 0x4b, 0xae, 0x9d, 0x4c, 0x3b, 0x93, 0xe9, - 0xb1, 0xa7, 0x0e, 0xb0, 0x0b, 0x10, 0x0b, 0x02, 0x20, 0x69, 0xd9, 0xe3, 0x43, 0x4f, 0x24, 0xde, - 0xbe, 0x7d, 0xfb, 0xf6, 0xe1, 0xbd, 0xb7, 0xef, 0x7d, 0x0b, 0xb8, 0xdd, 0x52, 0xcc, 0xa7, 0xbd, - 0xc3, 0x5c, 0x43, 0xeb, 0x6c, 0x34, 0x34, 0xd5, 0x94, 0x15, 0x15, 0xeb, 0xb7, 0x0c, 0x53, 0xd3, - 0xe5, 0x16, 0xbe, 0xa5, 0xa8, 0x26, 0xd6, 0x8f, 0xe4, 0x06, 0xde, 0x30, 0xba, 0xb8, 0xb1, 0xd1, - 0x30, 0x94, 0x5c, 0x57, 0xd7, 0x4c, 0x0d, 0x4d, 0x58, 0x7f, 0x8f, 0xdf, 0xe0, 0xd7, 0x5a, 0x9a, - 0xd6, 0x6a, 0xe3, 0x0d, 0x9b, 0x7a, 0xd8, 0x3b, 0xda, 0x68, 0x62, 0xa3, 0xa1, 0x2b, 0x5d, 0x53, - 0xd3, 0x09, 0x27, 0x7f, 0xd9, 0xcf, 0x61, 0x2a, 0x1d, 0x6c, 0x98, 0x72, 0xa7, 0x4b, 0x19, 0x2e, - 0xf9, 0x19, 0x7e, 0xa0, 0xcb, 0xdd, 0x2e, 0xd6, 0x0d, 0x32, 0x2e, 0x2c, 0xc1, 0xc2, 0x0e, 0x36, - 0xf7, 0xda, 0xbd, 0x96, 0xa2, 0x96, 0xd4, 0x23, 0x4d, 0xc4, 0x9f, 0xf6, 0xb0, 0x61, 0x0a, 0xff, - 0xe0, 0x60, 0xd1, 0x37, 0x60, 0x74, 0x35, 0xd5, 0xc0, 0x08, 0x41, 0x52, 0x95, 0x3b, 0x38, 0xcb, - 0xad, 0x71, 0xeb, 0x53, 0xa2, 0xfd, 0x1f, 0x5d, 0x83, 0xb9, 0x63, 0xac, 0x36, 0x35, 0x5d, 0x3a, - 0xc6, 0xba, 0xa1, 0x68, 0x6a, 0x36, 0x6e, 0x8f, 0xce, 0x12, 0xea, 0x01, 0x21, 0xa2, 0x1d, 0x48, - 0x77, 0x64, 0x55, 0x39, 0xc2, 0x86, 0x99, 0x4d, 0xac, 0x25, 0xd6, 0xa7, 0x37, 0x5f, 0xcf, 0x91, - 0xad, 0xe6, 0x02, 0xd7, 0xca, 0x3d, 0xa4, 0xdc, 0x45, 0xd5, 0xd4, 0x4f, 0x45, 0x77, 0x32, 0x7f, - 0x17, 0x66, 0x99, 0x21, 0x94, 0x81, 0xc4, 0x27, 0xf8, 0x94, 0xea, 0x64, 0xfd, 0x45, 0x0b, 0x90, - 0x3a, 0x96, 0xdb, 0x3d, 0x4c, 0x35, 0x21, 0x0f, 0x77, 0xe2, 0xdf, 0xe6, 0x84, 0x4b, 0xb0, 0xe2, - 0xae, 0x56, 0x90, 0xbb, 0xf2, 0xa1, 0xd2, 0x56, 0x4c, 0x05, 0x1b, 0xce, 0xd6, 0x3f, 0x82, 0xd5, - 0x90, 0x71, 0x6a, 0x81, 0x7b, 0x30, 0xd3, 0xf0, 0xd0, 0xb3, 0x9c, 0xbd, 0x95, 0xac, 0xb3, 0x15, - 0xdf, 0xcc, 0x53, 0x91, 0xe1, 0x16, 0xfe, 0x96, 0x80, 0x8c, 0x9f, 0x05, 0xdd, 0x83, 0x49, 0x03, - 0xeb, 0xc7, 0x4a, 0x83, 0xd8, 0x75, 0x7a, 0x73, 0x2d, 0x4c, 0x5a, 0xae, 0x46, 0xf8, 0x76, 0x63, - 0xa2, 0x33, 0x05, 0xed, 0x43, 0xe6, 0x58, 0x6b, 0xf7, 0x3a, 0x58, 0xc2, 0x27, 0x5d, 0x59, 0x75, - 0x5f, 0xc0, 0xf4, 0xe6, 0x7a, 0xa8, 0x98, 0x03, 0x7b, 0x42, 0xd1, 0xe1, 0xdf, 0x8d, 0x89, 0xe7, - 0x8e, 0x59, 0x12, 0xff, 0x73, 0x0e, 0x26, 0xe9, 0x6a, 0xe8, 0x1d, 0x48, 0x9a, 0xa7, 0x5d, 0xa2, - 0xdd, 0xdc, 0xe6, 0xb5, 0x61, 0xda, 0xe5, 0xea, 0xa7, 0x5d, 0x2c, 0xda, 0x53, 0x84, 0x0f, 0x20, - 0x69, 0x3d, 0xa1, 0x69, 0x98, 0xdc, 0xaf, 0x3c, 0xa8, 0x54, 0x1f, 0x55, 0x32, 0x31, 0xb4, 0x04, - 0xa8, 0x50, 0xad, 0xd4, 0xc5, 0x6a, 0xb9, 0x5c, 0x14, 0xa5, 0x5a, 0x51, 0x3c, 0x28, 0x15, 0x8a, - 0x19, 0x0e, 0xbd, 0x02, 0x6b, 0x07, 0xd5, 0xf2, 0xfe, 0xc3, 0xa2, 0x94, 0x2f, 0x14, 0x8a, 0xb5, - 0x5a, 0x69, 0xab, 0x54, 0x2e, 0xd5, 0x1f, 0x4b, 0x85, 0x6a, 0xa5, 0x56, 0x17, 0xf3, 0xa5, 0x4a, - 0xbd, 0x96, 0x89, 0xf3, 0x3f, 0xe6, 0xe0, 0x9c, 0x6f, 0x03, 0x28, 0xcf, 0x68, 0x78, 0x6b, 0xd4, - 0x8d, 0x7b, 0x35, 0xbd, 0x19, 0xa4, 0x29, 0xc0, 0x44, 0xb5, 0x52, 0x2e, 0x55, 0x2c, 0xed, 0xa6, - 0x61, 0xb2, 0x7a, 0xff, 0xbe, 0xfd, 0x10, 0xdf, 0x9a, 0x20, 0x0b, 0x0a, 0x73, 0x30, 0xb3, 0xa7, - 0x6b, 0x87, 0xd8, 0xf1, 0x9f, 0x3c, 0xcc, 0xd2, 0x67, 0xea, 0x2f, 0xdf, 0x82, 0x94, 0x8e, 0xe5, - 0xe6, 0x29, 0x7d, 0xb5, 0x7c, 0x8e, 0xc4, 0x64, 0xce, 0x89, 0xc9, 0xdc, 0x96, 0xa6, 0xb5, 0x0f, - 0x2c, 0xff, 0x14, 0x09, 0xa3, 0xf0, 0x4d, 0x12, 0xe6, 0x0b, 0x3a, 0x96, 0x4d, 0x4c, 0xb4, 0xa5, - 0xa2, 0x03, 0x63, 0xef, 0x1e, 0xcc, 0x59, 0xfe, 0xd5, 0x50, 0xcc, 0x53, 0x49, 0x97, 0xd5, 0x16, - 0xa6, 0xaf, 0x7e, 0xd1, 0xb1, 0x40, 0x81, 0x8e, 0x8a, 0xd6, 0xa0, 0x38, 0xdb, 0xf0, 0x3e, 0xa2, - 0x12, 0xcc, 0x53, 0xd7, 0x61, 0x5c, 0x3a, 0xc1, 0xba, 0x34, 0xd1, 0xc2, 0xe3, 0xd2, 0xe8, 0x98, - 0xa5, 0x28, 0xd8, 0x40, 0x0f, 0x00, 0xba, 0xb2, 0x2e, 0x77, 0xb0, 0x89, 0x75, 0x23, 0x9b, 0x64, - 0xe3, 0x3b, 0x60, 0x37, 0xb9, 0x3d, 0x97, 0x9b, 0xc4, 0xb7, 0x67, 0x3a, 0xda, 0xb1, 0x02, 0xa2, - 0xa1, 0x63, 0xd3, 0xc8, 0xa6, 0x6c, 0x49, 0xeb, 0x51, 0x92, 0x6a, 0x84, 0xd5, 0x16, 0xb3, 0x95, - 0xf8, 0xc5, 0x16, 0x27, 0x3a, 0xb3, 0x51, 0x15, 0x16, 0x9d, 0x0d, 0x6a, 0xaa, 0x89, 0x55, 0x53, - 0x32, 0xb4, 0x9e, 0xde, 0xc0, 0xd9, 0x09, 0xdb, 0x4a, 0xcb, 0xbe, 0x2d, 0x12, 0x9e, 0x9a, 0xcd, - 0x22, 0x52, 0xd3, 0x30, 0x44, 0xf4, 0x04, 0x78, 0xb9, 0xd1, 0xc0, 0x86, 0xa1, 0x10, 0x5b, 0x48, - 0x3a, 0xfe, 0xb4, 0xa7, 0xe8, 0xb8, 0x83, 0x55, 0xd3, 0xc8, 0x4e, 0xb2, 0x52, 0xeb, 0x5a, 0x57, - 0x6b, 0x6b, 0xad, 0x53, 0xb1, 0xcf, 0x23, 0x5e, 0x64, 0xa6, 0x7b, 0x46, 0x0c, 0xfe, 0x5d, 0x38, - 0xe7, 0x33, 0xca, 0x38, 0x99, 0x8d, 0xbf, 0x03, 0x33, 0x5e, 0x4b, 0x8c, 0x95, 0x15, 0x7f, 0x1a, - 0x87, 0xf9, 0x00, 0x1b, 0xa0, 0x5d, 0x48, 0x1b, 0xaa, 0xdc, 0x35, 0x9e, 0x6a, 0x26, 0xf5, 0xdf, - 0x1b, 0x11, 0x26, 0xcb, 0xd5, 0x28, 0x2f, 0x79, 0xdc, 0x8d, 0x89, 0xee, 0x6c, 0xb4, 0x05, 0x13, - 0xc4, 0x9e, 0xfe, 0xdc, 0x14, 0x24, 0x87, 0xd0, 0x5c, 0x29, 0x74, 0x26, 0xff, 0x06, 0xcc, 0xb1, - 0x2b, 0xa0, 0xcb, 0x30, 0xed, 0xac, 0x20, 0x29, 0x4d, 0xba, 0x57, 0x70, 0x48, 0xa5, 0x26, 0xff, - 0x3a, 0xcc, 0x78, 0x85, 0xa1, 0x65, 0x98, 0xa2, 0x0e, 0xe1, 0xb2, 0xa7, 0x09, 0xa1, 0xd4, 0x74, - 0x63, 0xfa, 0x3b, 0xb0, 0xc0, 0xfa, 0x19, 0x0d, 0xe5, 0xeb, 0xee, 0x1e, 0x88, 0x2d, 0xe6, 0xd8, - 0x3d, 0x38, 0x7a, 0x0a, 0xbf, 0x4b, 0x42, 0xc6, 0x1f, 0x34, 0xe8, 0x1e, 0xa4, 0x0e, 0xdb, 0x5a, - 0xe3, 0x13, 0x3a, 0xf7, 0x95, 0xb0, 0xe8, 0xca, 0x6d, 0x59, 0x5c, 0x84, 0xba, 0x1b, 0x13, 0xc9, - 0x24, 0x6b, 0x76, 0x47, 0xeb, 0xa9, 0x26, 0xb5, 0x5e, 0xf8, 0xec, 0x87, 0x16, 0x57, 0x7f, 0xb6, - 0x3d, 0x09, 0x6d, 0xc3, 0x34, 0x71, 0x3b, 0xa9, 0xa3, 0x35, 0x71, 0x36, 0x61, 0xcb, 0xb8, 0x1a, - 0x2a, 0x23, 0x6f, 0xf3, 0x3e, 0xd4, 0x9a, 0x58, 0x04, 0xd9, 0xfd, 0xcf, 0xcf, 0xc2, 0xb4, 0x47, - 0x37, 0x7e, 0x07, 0xa6, 0x3d, 0x8b, 0xa1, 0x0b, 0x30, 0x79, 0x64, 0x48, 0x6e, 0x12, 0x9e, 0x12, - 0x27, 0x8e, 0x0c, 0x3b, 0x9f, 0x5e, 0x86, 0x69, 0x5b, 0x0b, 0xe9, 0xa8, 0x2d, 0xb7, 0x8c, 0x6c, - 0x7c, 0x2d, 0x61, 0xbd, 0x23, 0x9b, 0x74, 0xdf, 0xa2, 0xf0, 0xff, 0xe2, 0x00, 0xfa, 0x4b, 0xa2, - 0x7b, 0x90, 0xb4, 0xb5, 0x24, 0xa9, 0x7c, 0x7d, 0x04, 0x2d, 0x73, 0xb6, 0xaa, 0xf6, 0x2c, 0xe1, - 0x57, 0x1c, 0x24, 0x6d, 0x31, 0xfe, 0x03, 0xa7, 0x56, 0xaa, 0xec, 0x94, 0x8b, 0x52, 0xa5, 0xba, - 0x5d, 0x94, 0x1e, 0x89, 0xa5, 0x7a, 0x51, 0xcc, 0x70, 0x68, 0x19, 0x2e, 0x78, 0xe9, 0x62, 0x31, - 0xbf, 0x5d, 0x14, 0xa5, 0x6a, 0xa5, 0xfc, 0x38, 0x13, 0x47, 0x3c, 0x2c, 0x3d, 0xdc, 0x2f, 0xd7, - 0x4b, 0x83, 0x63, 0x09, 0xb4, 0x02, 0x59, 0xcf, 0x18, 0x95, 0x41, 0xc5, 0x26, 0x2d, 0xb1, 0x9e, - 0x51, 0xf2, 0x97, 0x0e, 0xa6, 0xb6, 0x66, 0xdd, 0x97, 0x61, 0x3b, 0xdb, 0x23, 0x98, 0x65, 0x72, - 0xb4, 0x55, 0x4e, 0xd1, 0xa4, 0xd2, 0x94, 0x0e, 0x4f, 0x4d, 0xbb, 0xc4, 0xe0, 0xd6, 0x13, 0xe2, - 0xac, 0x43, 0xdd, 0xb2, 0x88, 0x96, 0x59, 0xdb, 0x4a, 0x47, 0x31, 0x29, 0x4f, 0xdc, 0xe6, 0x01, - 0x9b, 0x64, 0x33, 0x08, 0x5f, 0xc5, 0x61, 0x82, 0xbe, 0x9b, 0x6b, 0x9e, 0x53, 0x82, 0x11, 0xe9, - 0x50, 0x89, 0x48, 0x26, 0x38, 0xe2, 0x6c, 0x70, 0xa0, 0x5d, 0x98, 0xf3, 0xa6, 0xd2, 0x13, 0xa7, - 0x88, 0xbb, 0xc2, 0xbe, 0x20, 0x6f, 0x3c, 0x9f, 0xd0, 0xd2, 0x6d, 0xf6, 0xd8, 0x4b, 0x43, 0x5b, - 0x30, 0xe7, 0xcb, 0xc6, 0xc9, 0xe1, 0xd9, 0x78, 0xb6, 0xc1, 0x24, 0xa6, 0x3c, 0xcc, 0x3b, 0x89, - 0xb4, 0x8d, 0x25, 0x93, 0x26, 0x5a, 0x7a, 0x5a, 0x64, 0x06, 0x12, 0x30, 0xea, 0x33, 0x3b, 0x34, - 0xfe, 0x3d, 0x40, 0x83, 0xba, 0x8e, 0x95, 0x35, 0x7b, 0x30, 0x1f, 0x90, 0xe2, 0x51, 0x0e, 0xa6, - 0xec, 0x57, 0x65, 0x28, 0x26, 0xa6, 0xe5, 0xe1, 0xa0, 0x46, 0x7d, 0x16, 0x8b, 0xbf, 0xab, 0xe3, - 0x23, 0xac, 0xeb, 0xb8, 0x69, 0x87, 0x47, 0x20, 0xbf, 0xcb, 0x22, 0xfc, 0x84, 0x83, 0xb4, 0x43, - 0x47, 0x77, 0x20, 0x6d, 0xe0, 0x16, 0x39, 0x7e, 0xc8, 0x5a, 0x97, 0xfc, 0x73, 0x73, 0x35, 0xca, - 0x40, 0x0b, 0x69, 0x87, 0xdf, 0x2a, 0xa4, 0x99, 0xa1, 0xb1, 0x36, 0xff, 0x27, 0x0e, 0xe6, 0xb7, - 0x71, 0x1b, 0xfb, 0xab, 0x94, 0xa8, 0x0c, 0xeb, 0x3d, 0xd8, 0xe3, 0xec, 0xc1, 0x1e, 0x20, 0x2a, - 0xe2, 0x60, 0x3f, 0xd3, 0x61, 0xb7, 0x04, 0x0b, 0xec, 0x6a, 0x24, 0xbd, 0x0b, 0xff, 0x4e, 0xc0, - 0x25, 0xcb, 0x17, 0x74, 0xad, 0xdd, 0xc6, 0xfa, 0x5e, 0xef, 0xb0, 0xad, 0x18, 0x4f, 0xc7, 0xd8, - 0xdc, 0x05, 0x98, 0x54, 0xb5, 0xa6, 0x27, 0x78, 0x26, 0xac, 0xc7, 0x52, 0x13, 0x15, 0xe1, 0xbc, - 0xbf, 0xcc, 0x3a, 0xa5, 0x49, 0x38, 0xbc, 0xc8, 0xca, 0x1c, 0xfb, 0x4f, 0x10, 0x1e, 0xd2, 0x56, - 0x81, 0xa8, 0xa9, 0xed, 0x53, 0x3b, 0x62, 0xd2, 0xa2, 0xfb, 0x8c, 0x44, 0x7f, 0xc5, 0xf4, 0xa6, - 0x5b, 0x31, 0x45, 0xee, 0x28, 0xaa, 0x78, 0xfa, 0x78, 0x20, 0xe2, 0x27, 0x6c, 0xd1, 0xef, 0x8c, - 0x28, 0x7a, 0x68, 0x26, 0x38, 0xcb, 0x5b, 0x7c, 0x0e, 0xe1, 0xfb, 0x57, 0x0e, 0x2e, 0x87, 0x6e, - 0x81, 0x1e, 0xf9, 0x4d, 0x38, 0xd7, 0x25, 0x03, 0xae, 0x11, 0x48, 0x94, 0xdd, 0x1d, 0x6a, 0x04, - 0xda, 0xc5, 0x52, 0x2a, 0x63, 0x86, 0xb9, 0x2e, 0x43, 0xe4, 0xf3, 0x30, 0x1f, 0xc0, 0x36, 0xd6, - 0x66, 0xbe, 0xe6, 0x60, 0xad, 0xaf, 0xca, 0xbe, 0xda, 0x7d, 0x7e, 0xee, 0x5b, 0xef, 0xfb, 0x16, - 0x49, 0xf9, 0xb7, 0x07, 0xf7, 0x1e, 0xbc, 0xe0, 0x8b, 0x8a, 0xe0, 0xab, 0x70, 0x25, 0x62, 0x69, - 0x1a, 0xce, 0x5f, 0x25, 0xe1, 0xca, 0x81, 0xdc, 0x56, 0x9a, 0x6e, 0x21, 0x17, 0xd0, 0xef, 0x47, - 0x9b, 0xa4, 0x31, 0x10, 0x01, 0x24, 0x6b, 0xdd, 0x73, 0xa3, 0x76, 0x98, 0xfc, 0x11, 0x8e, 0xc3, - 0xe7, 0xd8, 0x84, 0x3d, 0x0e, 0x68, 0xc2, 0xde, 0x19, 0x5d, 0xd7, 0xa8, 0x96, 0x6c, 0xdf, 0x9f, - 0x60, 0xde, 0x1e, 0x5d, 0x6e, 0x84, 0x17, 0x9c, 0x39, 0x8a, 0x5f, 0x66, 0xd7, 0xf4, 0x97, 0x24, - 0x08, 0x51, 0xbb, 0xa7, 0x39, 0x44, 0x84, 0xa9, 0x86, 0xa6, 0x1e, 0x29, 0x7a, 0x07, 0x37, 0x69, - 0xf5, 0xff, 0xd6, 0x28, 0xc6, 0xa3, 0x09, 0xa4, 0xe0, 0xcc, 0x15, 0xfb, 0x62, 0x50, 0x16, 0x26, - 0x3b, 0xd8, 0x30, 0xe4, 0x96, 0xa3, 0x96, 0xf3, 0xc8, 0x7f, 0x91, 0x80, 0x29, 0x77, 0x0a, 0x52, - 0x07, 0x3c, 0x98, 0xa4, 0xaf, 0x9d, 0x67, 0x51, 0xe0, 0xd9, 0x9d, 0x39, 0xfe, 0x0c, 0xce, 0xdc, - 0x64, 0x9c, 0x99, 0x84, 0xc3, 0xf6, 0x33, 0xa9, 0x1d, 0xe1, 0xd7, 0x2f, 0xdd, 0x01, 0x85, 0xef, - 0x03, 0x2a, 0x2b, 0x06, 0xed, 0xa2, 0xdc, 0xb4, 0x64, 0x35, 0x4d, 0xf2, 0x89, 0x84, 0x55, 0x53, - 0x57, 0x68, 0xb9, 0x9e, 0x12, 0xa1, 0x23, 0x9f, 0x14, 0x09, 0xc5, 0x2a, 0xe9, 0x0d, 0x53, 0xd6, - 0x4d, 0x45, 0x6d, 0x49, 0xa6, 0xf6, 0x09, 0x76, 0x41, 0x57, 0x87, 0x5a, 0xb7, 0x88, 0xc2, 0xe7, - 0x1c, 0xcc, 0x33, 0xe2, 0xa9, 0x4f, 0xde, 0x85, 0xc9, 0xbe, 0x6c, 0xa6, 0x8c, 0x0f, 0xe0, 0xce, - 0x11, 0xb3, 0x39, 0x33, 0xd0, 0x2a, 0x80, 0x8a, 0x4f, 0x4c, 0x66, 0xdd, 0x29, 0x8b, 0x62, 0xaf, - 0xc9, 0x6f, 0x40, 0x8a, 0x98, 0x61, 0xd4, 0x7e, 0xf9, 0x8b, 0x38, 0xa0, 0x1d, 0x6c, 0xba, 0x6d, - 0x10, 0xb5, 0x41, 0x88, 0x2f, 0x71, 0xcf, 0xe0, 0x4b, 0xef, 0x33, 0xbe, 0x44, 0xbc, 0xf1, 0x86, - 0x07, 0x7d, 0xf6, 0x2d, 0x1d, 0x99, 0x09, 0x43, 0x5a, 0x0f, 0x52, 0xcf, 0x8d, 0xd6, 0x7a, 0x9c, - 0xd1, 0x65, 0xb6, 0x61, 0x9e, 0xd1, 0x99, 0xbe, 0xd3, 0x5b, 0x80, 0xe4, 0x63, 0x59, 0x69, 0xcb, - 0x96, 0x5e, 0x4e, 0x67, 0x47, 0x3b, 0xbd, 0xf3, 0xee, 0x88, 0x33, 0x4d, 0x10, 0xbc, 0x05, 0x03, - 0x95, 0xe7, 0x47, 0xc3, 0xdb, 0xde, 0x83, 0x76, 0x80, 0x87, 0xae, 0xbb, 0x13, 0x88, 0x88, 0x5f, - 0x1d, 0x2c, 0x12, 0x28, 0x3c, 0x1c, 0x0a, 0x8e, 0x7f, 0x96, 0x80, 0xe5, 0x08, 0x6e, 0x74, 0x17, - 0x12, 0x7a, 0xb7, 0x41, 0x9d, 0xe9, 0xd5, 0x11, 0xe4, 0xe7, 0xc4, 0xbd, 0xc2, 0x6e, 0x4c, 0xb4, - 0x66, 0xf1, 0xbf, 0x89, 0x43, 0x42, 0xdc, 0x2b, 0xa0, 0xf7, 0x18, 0xa4, 0xf8, 0xe6, 0x88, 0x52, - 0xbc, 0x40, 0xf1, 0x97, 0x5c, 0x10, 0x52, 0x9c, 0x85, 0x85, 0x82, 0x58, 0xcc, 0xd7, 0x8b, 0xd2, - 0x76, 0xb1, 0x5c, 0xac, 0x17, 0x25, 0x82, 0x64, 0x67, 0x38, 0xb4, 0x02, 0xd9, 0xbd, 0xfd, 0xad, - 0x72, 0xa9, 0xb6, 0x2b, 0xed, 0x57, 0x9c, 0x7f, 0x74, 0x34, 0x8e, 0x32, 0x30, 0x53, 0x2e, 0xd5, - 0xea, 0x94, 0x50, 0xcb, 0x24, 0x2c, 0xca, 0x4e, 0xb1, 0x2e, 0x15, 0xf2, 0x7b, 0xf9, 0x42, 0xa9, - 0xfe, 0x38, 0x93, 0x44, 0x3c, 0x2c, 0xb1, 0xb2, 0x6b, 0x95, 0xfc, 0x5e, 0x6d, 0xb7, 0x5a, 0xcf, - 0xa4, 0x10, 0x82, 0x39, 0x7b, 0xbe, 0x43, 0xaa, 0x65, 0x26, 0x2c, 0x09, 0x85, 0x72, 0xb5, 0xe2, - 0xea, 0x30, 0x89, 0x16, 0x20, 0xe3, 0xac, 0x2c, 0x16, 0xf3, 0xdb, 0x36, 0x8a, 0x91, 0x46, 0xe7, - 0x61, 0xb6, 0xf8, 0xe1, 0x5e, 0xbe, 0xb2, 0xed, 0x30, 0x4e, 0xb9, 0x18, 0xd8, 0xd7, 0x71, 0x58, - 0x24, 0x20, 0x98, 0x03, 0xb9, 0x39, 0x61, 0xb9, 0x0e, 0x19, 0xd2, 0xb6, 0x4b, 0xfe, 0xc2, 0x69, - 0x8e, 0xd0, 0x0f, 0x9c, 0xf2, 0xc9, 0x01, 0xac, 0xe3, 0x1e, 0xc0, 0xba, 0xe4, 0x2f, 0x26, 0x6f, - 0xb0, 0xd0, 0xae, 0x6f, 0xb5, 0xa8, 0xfe, 0xe4, 0x61, 0x40, 0xb5, 0x73, 0x2b, 0x5a, 0x5a, 0xd4, - 0x49, 0x70, 0x96, 0x66, 0xe4, 0x8c, 0x01, 0x7d, 0x1f, 0x96, 0xfc, 0xfa, 0xd2, 0xd8, 0xba, 0x39, - 0x00, 0xc0, 0xba, 0x19, 0xc6, 0xe5, 0x75, 0x39, 0x84, 0xbf, 0x73, 0x90, 0x76, 0xc8, 0x56, 0x96, - 0x36, 0x94, 0x1f, 0x62, 0x06, 0xf0, 0x99, 0xb2, 0x28, 0x2e, 0x7e, 0xe4, 0x85, 0x4e, 0xe3, 0x7e, - 0xe8, 0x34, 0xf0, 0x3d, 0x27, 0x02, 0xdf, 0xf3, 0x77, 0x61, 0xb6, 0x61, 0xa9, 0xaf, 0x68, 0xaa, - 0x64, 0x2a, 0x1d, 0x07, 0xcf, 0x19, 0xbc, 0xea, 0xa8, 0x3b, 0xf7, 0x93, 0xe2, 0x8c, 0x33, 0xc1, - 0x22, 0xa1, 0x35, 0x98, 0xb1, 0xaf, 0x3e, 0x24, 0x53, 0x93, 0x7a, 0x06, 0xce, 0xa6, 0xec, 0xee, - 0x16, 0x6c, 0x5a, 0x5d, 0xdb, 0x37, 0xb0, 0xf0, 0x67, 0x0e, 0x16, 0x49, 0xd3, 0xee, 0x77, 0xc7, - 0x61, 0x10, 0xb0, 0xd7, 0xe3, 0x7c, 0x89, 0x3f, 0x50, 0xe0, 0x8b, 0xea, 0x59, 0xb2, 0xb0, 0xe4, - 0x5f, 0x8f, 0x36, 0x2a, 0xbf, 0xe5, 0x60, 0xc1, 0x3a, 0x75, 0x9d, 0x81, 0xe7, 0x5d, 0x04, 0x8c, - 0xf1, 0x26, 0x7d, 0xc6, 0x4c, 0xfa, 0x8d, 0x29, 0xfc, 0x9e, 0x83, 0x45, 0x9f, 0xae, 0xd4, 0x53, - 0xdf, 0xf5, 0x57, 0x14, 0x57, 0xbd, 0x15, 0xc5, 0x00, 0xff, 0x98, 0x35, 0xc5, 0x6d, 0xa7, 0xa6, - 0x18, 0x2f, 0x20, 0x3e, 0x8b, 0xc3, 0x6a, 0x3f, 0xb3, 0xdb, 0xd7, 0x7e, 0xcd, 0x31, 0x5a, 0xe2, - 0xb3, 0xdd, 0xae, 0x7d, 0xe0, 0x4f, 0x75, 0x9b, 0x83, 0x87, 0x4d, 0x80, 0x4a, 0x2f, 0xca, 0x01, - 0x7f, 0xe4, 0x45, 0xb7, 0xd8, 0x75, 0xe9, 0x2b, 0x1c, 0x11, 0x26, 0x7e, 0x1b, 0x2e, 0xd8, 0x40, - 0x81, 0x7b, 0xdd, 0xec, 0x5c, 0x82, 0x91, 0x2c, 0x92, 0x16, 0x17, 0xad, 0x61, 0xf7, 0x8e, 0x95, - 0x42, 0xa3, 0x4d, 0xe1, 0x9b, 0x24, 0x2c, 0x55, 0xb4, 0x26, 0xae, 0x99, 0x72, 0x6b, 0x1c, 0xd0, - 0xf0, 0x7b, 0x83, 0x18, 0x4c, 0x9c, 0xb5, 0x67, 0xb0, 0xd4, 0x51, 0xa0, 0x17, 0x94, 0x83, 0x79, - 0xc3, 0x94, 0x5b, 0x76, 0x04, 0xc9, 0x7a, 0x0b, 0x9b, 0x52, 0x57, 0x36, 0x9f, 0xd2, 0xf0, 0x38, - 0x4f, 0x87, 0xea, 0xf6, 0xc8, 0x9e, 0x6c, 0x3e, 0x0d, 0xc6, 0xf2, 0x92, 0x63, 0x63, 0x79, 0xef, - 0xfb, 0xdb, 0xe9, 0xd7, 0x87, 0xec, 0x25, 0xe2, 0x1c, 0xfc, 0x30, 0x04, 0xa7, 0x7b, 0x63, 0x88, - 0xc8, 0xe1, 0xf8, 0xdc, 0xd9, 0x71, 0xa9, 0x97, 0x0c, 0xf1, 0x5d, 0x84, 0x0b, 0x03, 0x9b, 0xa7, - 0x59, 0xb7, 0x05, 0x59, 0x6b, 0x68, 0x5f, 0x35, 0xc6, 0x74, 0xc7, 0x10, 0x8f, 0x89, 0x87, 0x78, - 0x8c, 0xb0, 0x0c, 0x17, 0x03, 0x16, 0xa2, 0x5a, 0xfc, 0x31, 0x45, 0xd4, 0x18, 0x1f, 0x6d, 0xfe, - 0x28, 0x2c, 0x2a, 0xde, 0xf2, 0xbe, 0xf6, 0x40, 0x60, 0xf6, 0x45, 0xc4, 0xc5, 0x65, 0x98, 0xf6, - 0xf2, 0xd1, 0x93, 0xc3, 0x1c, 0x12, 0x38, 0xa9, 0x33, 0x81, 0xe0, 0x13, 0x3e, 0x10, 0xbc, 0xdc, - 0x0f, 0xaa, 0x49, 0xb6, 0x1a, 0x0c, 0x35, 0x45, 0x44, 0x58, 0x3d, 0x19, 0x08, 0xab, 0x34, 0x8b, - 0xac, 0x87, 0x0a, 0xfd, 0x3f, 0x08, 0x2c, 0xea, 0xd4, 0x81, 0x90, 0xb7, 0xf0, 0x04, 0x78, 0xe2, - 0xf1, 0xe3, 0x83, 0xd0, 0x3e, 0x37, 0x8a, 0xfb, 0xdd, 0x48, 0x58, 0x85, 0xe5, 0x40, 0xd9, 0x74, - 0xe9, 0xc7, 0x44, 0xaf, 0x1d, 0x4c, 0x31, 0x8c, 0x9a, 0x29, 0x9b, 0xc6, 0xa8, 0x2b, 0xd3, 0x41, - 0xef, 0xca, 0x84, 0x64, 0xaf, 0xbc, 0x43, 0x76, 0xe5, 0x17, 0x4d, 0xcf, 0xce, 0xd7, 0x20, 0xd5, - 0xb3, 0xe1, 0x38, 0x52, 0xfc, 0xcc, 0xb3, 0x2e, 0xbd, 0x6f, 0x0d, 0x89, 0x84, 0x43, 0xf8, 0x03, - 0x07, 0xd3, 0x1e, 0x32, 0x5a, 0x81, 0x29, 0xb7, 0x3b, 0x77, 0xea, 0x74, 0x97, 0x60, 0xbd, 0x03, - 0x53, 0x33, 0xe5, 0x36, 0xbd, 0xe1, 0x25, 0x0f, 0x56, 0x6b, 0xd5, 0x33, 0x30, 0x29, 0xe3, 0x12, - 0xa2, 0xfd, 0x1f, 0xdd, 0x84, 0x64, 0x4f, 0x55, 0x4c, 0x3b, 0xf6, 0xe6, 0xfc, 0x41, 0x65, 0x2f, - 0x95, 0xdb, 0x57, 0x15, 0x53, 0xb4, 0xb9, 0x84, 0x1b, 0x90, 0xb4, 0x9e, 0xd8, 0x26, 0x76, 0x0a, - 0x52, 0x5b, 0x8f, 0xeb, 0xc5, 0x5a, 0x86, 0x43, 0x00, 0x13, 0xa5, 0x4a, 0x75, 0xbb, 0x58, 0xcb, - 0xc4, 0x85, 0x15, 0x77, 0xeb, 0x41, 0x20, 0xc1, 0xc7, 0xe4, 0x95, 0x84, 0xc1, 0x03, 0xf9, 0x40, - 0x78, 0x60, 0x95, 0x39, 0x9c, 0x86, 0x00, 0x03, 0xff, 0xe1, 0x60, 0x31, 0x90, 0x0f, 0xdd, 0xf6, - 0x42, 0x02, 0x57, 0x22, 0x65, 0x7a, 0xc1, 0x80, 0x5f, 0x72, 0x04, 0x0c, 0xb8, 0xc3, 0x80, 0x01, - 0xd7, 0x87, 0xce, 0xf7, 0xc2, 0x00, 0x07, 0x21, 0x28, 0x40, 0xad, 0x9e, 0xdf, 0x29, 0x4a, 0xfb, - 0x15, 0xf2, 0xeb, 0xa2, 0x00, 0x0b, 0x90, 0xb1, 0xba, 0x7a, 0xfa, 0x7d, 0x5b, 0xad, 0x9e, 0xaf, - 0xd7, 0x32, 0xf1, 0xc1, 0x0e, 0x3c, 0xe1, 0x76, 0xe0, 0x0b, 0x80, 0xa8, 0x59, 0xbd, 0x9f, 0x66, - 0x7e, 0xce, 0xc1, 0x3c, 0x43, 0xa6, 0x56, 0xf6, 0xdc, 0xde, 0x70, 0xcc, 0xed, 0xcd, 0x06, 0x2c, - 0x58, 0x4d, 0x04, 0x71, 0x64, 0x43, 0xea, 0x62, 0x5d, 0xb2, 0x46, 0xa8, 0x3b, 0x9d, 0xef, 0xc8, - 0x27, 0x14, 0xed, 0xdb, 0xc3, 0xba, 0x25, 0xf8, 0x39, 0xe0, 0x5b, 0xc2, 0xcf, 0x38, 0x72, 0xee, - 0x8e, 0x5d, 0x70, 0x0f, 0x0b, 0xc2, 0x80, 0x8a, 0x3c, 0x31, 0x7a, 0x45, 0x2e, 0xe4, 0xc9, 0x61, - 0x7b, 0x86, 0xe2, 0x77, 0xf3, 0xbf, 0x1c, 0xa4, 0x4b, 0x4d, 0xac, 0x9a, 0x96, 0xf7, 0x55, 0x60, - 0x96, 0xf9, 0x74, 0x15, 0xad, 0x84, 0x7c, 0xd1, 0x6a, 0x6f, 0x9d, 0x5f, 0x8d, 0xfc, 0xde, 0x55, - 0x88, 0xa1, 0x23, 0xcf, 0x67, 0xb7, 0x0c, 0x7e, 0xf9, 0xca, 0xc0, 0xcc, 0x80, 0x40, 0xe4, 0xaf, - 0x0d, 0xe1, 0x72, 0xd7, 0x79, 0x1b, 0x52, 0xf6, 0x47, 0x8a, 0x68, 0xc1, 0xfd, 0x50, 0xd2, 0xf3, - 0x0d, 0x23, 0xbf, 0xe8, 0xa3, 0x3a, 0xf3, 0x36, 0xff, 0x99, 0x06, 0xe8, 0xf7, 0x10, 0xe8, 0x01, - 0xcc, 0x78, 0xbf, 0x93, 0x42, 0xcb, 0x11, 0x5f, 0xe9, 0xf1, 0x2b, 0xc1, 0x83, 0xae, 0x4e, 0x0f, - 0x60, 0xc6, 0x7b, 0x2b, 0xdf, 0x17, 0x16, 0xf0, 0x65, 0x40, 0x5f, 0x58, 0xe0, 0x45, 0x7e, 0x0c, - 0xb5, 0xe1, 0x42, 0xc8, 0xbd, 0x2c, 0xba, 0x3e, 0xda, 0xed, 0x35, 0xff, 0xea, 0x88, 0x17, 0xbc, - 0x42, 0x0c, 0xe9, 0x70, 0x31, 0xf4, 0x3a, 0x12, 0xad, 0x8f, 0x7a, 0x59, 0xca, 0xbf, 0x36, 0x02, - 0xa7, 0xbb, 0x66, 0x0f, 0xf8, 0xf0, 0x3b, 0x10, 0xf4, 0xda, 0xc8, 0x97, 0x73, 0xfc, 0x8d, 0xd1, - 0xaf, 0x54, 0x84, 0x18, 0xda, 0x85, 0x69, 0xcf, 0x05, 0x01, 0xe2, 0x03, 0x6f, 0x0d, 0x88, 0xe0, - 0xe5, 0x88, 0x1b, 0x05, 0x22, 0xc9, 0x03, 0x62, 0xf7, 0x25, 0x0d, 0xa2, 0xf1, 0x7d, 0x49, 0x01, - 0xa8, 0xb7, 0xdf, 0xfc, 0xbe, 0x53, 0x28, 0xc8, 0xfc, 0xc1, 0xc7, 0x58, 0x90, 0xf9, 0x43, 0x8e, - 0x34, 0x21, 0x86, 0x3e, 0x80, 0x39, 0x16, 0xb1, 0x43, 0xab, 0x91, 0xc8, 0x23, 0x7f, 0x29, 0x6c, - 0xd8, 0x2b, 0x92, 0x05, 0x88, 0xfa, 0x22, 0x03, 0x81, 0xaa, 0xbe, 0xc8, 0x10, 0x5c, 0x29, 0x66, - 0xe5, 0x27, 0x06, 0x7c, 0xe9, 0xe7, 0xa7, 0x20, 0xbc, 0xa9, 0x9f, 0x9f, 0x02, 0x11, 0x1b, 0x21, - 0x86, 0x14, 0x58, 0x0a, 0x86, 0x10, 0xd0, 0xb5, 0x91, 0xa0, 0x0d, 0xfe, 0xfa, 0x30, 0x36, 0x37, - 0xd5, 0x7c, 0x99, 0x82, 0xa4, 0x7d, 0x1c, 0xd5, 0xe1, 0x9c, 0xaf, 0x85, 0x43, 0x97, 0xa2, 0x1b, - 0x5b, 0xfe, 0x72, 0xe8, 0xb8, 0xbb, 0x93, 0x27, 0x70, 0x7e, 0xa0, 0x29, 0x43, 0x6b, 0xde, 0x79, - 0x41, 0x8d, 0x21, 0x7f, 0x25, 0x82, 0xc3, 0x2f, 0x9b, 0x4d, 0x3b, 0x6b, 0xc3, 0xba, 0x06, 0x56, - 0x76, 0x58, 0xaa, 0xf9, 0x98, 0x9c, 0xfe, 0xfe, 0x24, 0x23, 0xb0, 0x7a, 0x05, 0xa6, 0x97, 0xab, - 0x91, 0x3c, 0xee, 0x0a, 0x1f, 0xb9, 0x65, 0x87, 0xa7, 0xcc, 0x45, 0x8c, 0x72, 0x81, 0xd5, 0x35, - 0x2f, 0x44, 0xb1, 0xb8, 0xe2, 0x1f, 0x41, 0xc6, 0x7f, 0x04, 0x23, 0xe6, 0x7d, 0x05, 0xb9, 0xcd, - 0x5a, 0x38, 0x83, 0xdf, 0x32, 0xfe, 0xf8, 0xf7, 0x6b, 0x15, 0x14, 0xf9, 0x57, 0x23, 0x79, 0xbc, - 0x19, 0xcb, 0x53, 0x79, 0xf5, 0x33, 0xd6, 0x60, 0x95, 0xd6, 0xcf, 0x58, 0x01, 0xa5, 0x9a, 0x10, - 0xbb, 0xf3, 0x2e, 0x40, 0xc3, 0x50, 0x24, 0xd2, 0x69, 0xa2, 0xd5, 0x01, 0x9c, 0xfc, 0xbe, 0x82, - 0xdb, 0xcd, 0x6a, 0xd7, 0x54, 0x34, 0xd5, 0xc8, 0xfe, 0x3a, 0x6d, 0xb7, 0xb9, 0x53, 0x0d, 0x43, - 0x21, 0x0d, 0xdf, 0x56, 0xea, 0x49, 0xa2, 0x61, 0x28, 0x87, 0x13, 0x36, 0xff, 0x9b, 0xff, 0x0b, - 0x00, 0x00, 0xff, 0xff, 0x95, 0x8e, 0xb8, 0x49, 0x68, 0x34, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/progress.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/progress.go deleted file mode 100644 index ef3787db65d..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/progress.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import "fmt" - -const ( - ProgressStateProbe ProgressStateType = iota - ProgressStateReplicate - ProgressStateSnapshot -) - -type ProgressStateType uint64 - -var prstmap = [...]string{ - "ProgressStateProbe", - "ProgressStateReplicate", - "ProgressStateSnapshot", -} - -func (st ProgressStateType) String() string { return prstmap[uint64(st)] } - -// Progress represents a follower’s progress in the view of the leader. Leader maintains -// progresses of all followers, and sends entries to the follower based on its progress. -type Progress struct { - Match, Next uint64 - // State defines how the leader should interact with the follower. - // - // When in ProgressStateProbe, leader sends at most one replication message - // per heartbeat interval. It also probes actual progress of the follower. - // - // When in ProgressStateReplicate, leader optimistically increases next - // to the latest entry sent after sending replication message. This is - // an optimized state for fast replicating log entries to the follower. - // - // When in ProgressStateSnapshot, leader should have sent out snapshot - // before and stops sending any replication message. - State ProgressStateType - - // Paused is used in ProgressStateProbe. - // When Paused is true, raft should pause sending replication message to this peer. - Paused bool - // PendingSnapshot is used in ProgressStateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. - PendingSnapshot uint64 - - // RecentActive is true if the progress is recently active. Receiving any messages - // from the corresponding follower indicates the progress is active. - // RecentActive can be reset to false after an election timeout. - RecentActive bool - - // inflights is a sliding window for the inflight messages. - // Each inflight message contains one or more log entries. - // The max number of entries per message is defined in raft config as MaxSizePerMsg. - // Thus inflight effectively limits both the number of inflight messages - // and the bandwidth each Progress can use. - // When inflights is full, no more message should be sent. - // When a leader sends out a message, the index of the last - // entry should be added to inflights. The index MUST be added - // into inflights in order. - // When a leader receives a reply, the previous inflights should - // be freed by calling inflights.freeTo with the index of the last - // received entry. - ins *inflights - - // IsLearner is true if this progress is tracked for a learner. - IsLearner bool -} - -func (pr *Progress) resetState(state ProgressStateType) { - pr.Paused = false - pr.PendingSnapshot = 0 - pr.State = state - pr.ins.reset() -} - -func (pr *Progress) becomeProbe() { - // If the original state is ProgressStateSnapshot, progress knows that - // the pending snapshot has been sent to this peer successfully, then - // probes from pendingSnapshot + 1. - if pr.State == ProgressStateSnapshot { - pendingSnapshot := pr.PendingSnapshot - pr.resetState(ProgressStateProbe) - pr.Next = max(pr.Match+1, pendingSnapshot+1) - } else { - pr.resetState(ProgressStateProbe) - pr.Next = pr.Match + 1 - } -} - -func (pr *Progress) becomeReplicate() { - pr.resetState(ProgressStateReplicate) - pr.Next = pr.Match + 1 -} - -func (pr *Progress) becomeSnapshot(snapshoti uint64) { - pr.resetState(ProgressStateSnapshot) - pr.PendingSnapshot = snapshoti -} - -// maybeUpdate returns false if the given n index comes from an outdated message. -// Otherwise it updates the progress and returns true. -func (pr *Progress) maybeUpdate(n uint64) bool { - var updated bool - if pr.Match < n { - pr.Match = n - updated = true - pr.resume() - } - if pr.Next < n+1 { - pr.Next = n + 1 - } - return updated -} - -func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } - -// maybeDecrTo returns false if the given to index comes from an out of order message. -// Otherwise it decreases the progress next index to min(rejected, last) and returns true. -func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { - if pr.State == ProgressStateReplicate { - // the rejection must be stale if the progress has matched and "rejected" - // is smaller than "match". - if rejected <= pr.Match { - return false - } - // directly decrease next to match + 1 - pr.Next = pr.Match + 1 - return true - } - - // the rejection must be stale if "rejected" does not match next - 1 - if pr.Next-1 != rejected { - return false - } - - if pr.Next = min(rejected, last+1); pr.Next < 1 { - pr.Next = 1 - } - pr.resume() - return true -} - -func (pr *Progress) pause() { pr.Paused = true } -func (pr *Progress) resume() { pr.Paused = false } - -// IsPaused returns whether sending log entries to this node has been -// paused. A node may be paused because it has rejected recent -// MsgApps, is currently waiting for a snapshot, or has reached the -// MaxInflightMsgs limit. -func (pr *Progress) IsPaused() bool { - switch pr.State { - case ProgressStateProbe: - return pr.Paused - case ProgressStateReplicate: - return pr.ins.full() - case ProgressStateSnapshot: - return true - default: - panic("unexpected state") - } -} - -func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } - -// needSnapshotAbort returns true if snapshot progress's Match -// is equal or higher than the pendingSnapshot. -func (pr *Progress) needSnapshotAbort() bool { - return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot -} - -func (pr *Progress) String() string { - return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) -} - -type inflights struct { - // the starting index in the buffer - start int - // number of inflights in the buffer - count int - - // the size of the buffer - size int - - // buffer contains the index of the last entry - // inside one message. - buffer []uint64 -} - -func newInflights(size int) *inflights { - return &inflights{ - size: size, - } -} - -// add adds an inflight into inflights -func (in *inflights) add(inflight uint64) { - if in.full() { - panic("cannot add into a full inflights") - } - next := in.start + in.count - size := in.size - if next >= size { - next -= size - } - if next >= len(in.buffer) { - in.growBuf() - } - in.buffer[next] = inflight - in.count++ -} - -// grow the inflight buffer by doubling up to inflights.size. We grow on demand -// instead of preallocating to inflights.size to handle systems which have -// thousands of Raft groups per process. -func (in *inflights) growBuf() { - newSize := len(in.buffer) * 2 - if newSize == 0 { - newSize = 1 - } else if newSize > in.size { - newSize = in.size - } - newBuffer := make([]uint64, newSize) - copy(newBuffer, in.buffer) - in.buffer = newBuffer -} - -// freeTo frees the inflights smaller or equal to the given `to` flight. -func (in *inflights) freeTo(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { - // out of the left side of the window - return - } - - idx := in.start - var i int - for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight - break - } - - // increase index and maybe rotate - size := in.size - if idx++; idx >= size { - idx -= size - } - } - // free i inflights and set new start index - in.count -= i - in.start = idx - if in.count == 0 { - // inflights is empty, reset the start index so that we don't grow the - // buffer unnecessarily. - in.start = 0 - } -} - -func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } - -// full returns true if the inflights is full. -func (in *inflights) full() bool { - return in.count == in.size -} - -// resets frees all inflights. -func (in *inflights) reset() { - in.count = 0 - in.start = 0 -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto deleted file mode 100644 index 644ce7b8f2f..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto +++ /dev/null @@ -1,95 +0,0 @@ -syntax = "proto2"; -package raftpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -enum EntryType { - EntryNormal = 0; - EntryConfChange = 1; -} - -message Entry { - optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations - optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations - optional EntryType Type = 1 [(gogoproto.nullable) = false]; - optional bytes Data = 4; -} - -message SnapshotMetadata { - optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; - optional uint64 index = 2 [(gogoproto.nullable) = false]; - optional uint64 term = 3 [(gogoproto.nullable) = false]; -} - -message Snapshot { - optional bytes data = 1; - optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; -} - -enum MessageType { - MsgHup = 0; - MsgBeat = 1; - MsgProp = 2; - MsgApp = 3; - MsgAppResp = 4; - MsgVote = 5; - MsgVoteResp = 6; - MsgSnap = 7; - MsgHeartbeat = 8; - MsgHeartbeatResp = 9; - MsgUnreachable = 10; - MsgSnapStatus = 11; - MsgCheckQuorum = 12; - MsgTransferLeader = 13; - MsgTimeoutNow = 14; - MsgReadIndex = 15; - MsgReadIndexResp = 16; - MsgPreVote = 17; - MsgPreVoteResp = 18; -} - -message Message { - optional MessageType type = 1 [(gogoproto.nullable) = false]; - optional uint64 to = 2 [(gogoproto.nullable) = false]; - optional uint64 from = 3 [(gogoproto.nullable) = false]; - optional uint64 term = 4 [(gogoproto.nullable) = false]; - optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; - optional uint64 index = 6 [(gogoproto.nullable) = false]; - repeated Entry entries = 7 [(gogoproto.nullable) = false]; - optional uint64 commit = 8 [(gogoproto.nullable) = false]; - optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; - optional bool reject = 10 [(gogoproto.nullable) = false]; - optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; - optional bytes context = 12; -} - -message HardState { - optional uint64 term = 1 [(gogoproto.nullable) = false]; - optional uint64 vote = 2 [(gogoproto.nullable) = false]; - optional uint64 commit = 3 [(gogoproto.nullable) = false]; -} - -message ConfState { - repeated uint64 nodes = 1; - repeated uint64 learners = 2; -} - -enum ConfChangeType { - ConfChangeAddNode = 0; - ConfChangeRemoveNode = 1; - ConfChangeUpdateNode = 2; - ConfChangeAddLearnerNode = 3; -} - -message ConfChange { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; - optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; - optional bytes Context = 4; -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/util.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/util.go deleted file mode 100644 index f4141fe65dd..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/util.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -func (st StateType) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", st.String())), nil -} - -// uint64Slice implements sort interface -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func min(a, b uint64) uint64 { - if a > b { - return b - } - return a -} - -func max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func IsLocalMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || - msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum -} - -func IsResponseMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp -} - -// voteResponseType maps vote and prevote message types to their corresponding responses. -func voteRespMsgType(msgt pb.MessageType) pb.MessageType { - switch msgt { - case pb.MsgVote: - return pb.MsgVoteResp - case pb.MsgPreVote: - return pb.MsgPreVoteResp - default: - panic(fmt.Sprintf("not a vote message: %s", msgt)) - } -} - -// EntryFormatter can be implemented by the application to provide human-readable formatting -// of entry data. Nil is a valid EntryFormatter and will use a default format. -type EntryFormatter func([]byte) string - -// DescribeMessage returns a concise human-readable description of a -// Message for debugging. -func DescribeMessage(m pb.Message, f EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected") - if m.RejectHint != 0 { - fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) - } - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) - } - return buf.String() -} - -// DescribeEntry returns a concise human-readable description of an -// Entry for debugging. -func DescribeEntry(e pb.Entry, f EntryFormatter) string { - var formatted string - if e.Type == pb.EntryNormal && f != nil { - formatted = f(e.Data) - } else { - formatted = fmt.Sprintf("%q", e.Data) - } - return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) -} - -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { - if len(ents) == 0 { - return ents - } - size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { - size += ents[limit].Size() - if uint64(size) > maxSize { - break - } - } - return ents[:limit] -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go index 1d54810aff4..f652582e658 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -143,7 +143,7 @@ func NewUserConnection() (*Conn, error) { func NewSystemdConnection() (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { // We skip Hello when talking directly to systemd. - return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { return dbus.Dial("unix:path=/run/systemd/private") }) }) @@ -201,7 +201,7 @@ func (c *Conn) GetManagerProperty(prop string) (string, error) { return variant.String(), nil } -func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { +func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { conn, err := createBus() if err != nil { return nil, err @@ -221,7 +221,7 @@ func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error return conn, nil } -func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { +func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { conn, err := dbusAuthConnection(createBus) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go index 0b4207229f7..5f2790acff9 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -117,13 +117,13 @@ func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } -// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart // otherwise. func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) } -// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" // flavored restart otherwise. func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) @@ -192,7 +192,7 @@ func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { return c.getProperties(path, "org.freedesktop.systemd1.Unit") } -// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties. +// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { return c.getProperties(path, "org.freedesktop.systemd1.Unit") } @@ -291,6 +291,8 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { // ListUnits returns an array with all currently loaded units. Note that // units may be known by multiple names at the same time, and hence there might // be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. func (c *Conn) ListUnits() ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go index 70e63a6f16e..f6d7a08a106 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -94,7 +94,7 @@ func (c *Conn) dispatch() { }() } -// Returns two unbuffered channels which will receive all changed units every +// SubscribeUnits returns two unbuffered channels which will receive all changed units every // interval. Deleted units are sent as nil. func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/journal/journal.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/journal/journal.go index ef85a3ba245..a0f4837a02c 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/journal/journal.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -33,7 +33,10 @@ import ( "os" "strconv" "strings" + "sync" + "sync/atomic" "syscall" + "unsafe" ) // Priority of a journal message @@ -50,19 +53,35 @@ const ( PriDebug ) -var conn net.Conn +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } + onceConn.Do(initConn) } -// Enabled returns true if the local systemd journal is available for logging +// Enabled checks whether the local systemd journal is available for logging. func Enabled() bool { - return conn != nil + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true } // Send a message to the local systemd journal. vars is a map of journald @@ -73,8 +92,14 @@ func Enabled() bool { // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) if conn == nil { - return journalError("could not connect to journald socket") + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", } data := new(bytes.Buffer) @@ -84,32 +109,30 @@ func Send(message string, priority Priority, vars map[string]string) error { appendVariable(data, k, v) } - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - _, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil) - if err != nil { - return journalError(err.Error()) - } - } else if err != nil { - return journalError(err.Error()) + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + return nil } @@ -119,8 +142,8 @@ func Print(priority Priority, format string, a ...interface{}) error { } func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) } if strings.ContainsRune(value, '\n') { /* When the value contains a newline, we write: @@ -137,32 +160,42 @@ func appendVariable(w io.Writer, name, value string) { } } -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } - valid := name[0] != '_' for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } } - return valid + return nil } +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. func isSocketSpaceError(err error) bool { opErr, ok := err.(*net.OpError) - if !ok { + if !ok || opErr == nil { return false } - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { return false } - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS } +// tempFd creates a temporary, unlinked file under `/dev/shm`. func tempFd() (*os.File, error) { file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") if err != nil { @@ -175,8 +208,18 @@ func tempFd() (*os.File, error) { return file, nil } -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util.go deleted file mode 100644 index 7828ce6f049..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package util contains utility functions related to systemd that applications -// can use to check things like whether systemd is running. Note that some of -// these functions attempt to manually load systemd libraries at runtime rather -// than linking against them. -package util - -import ( - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled") -) - -// GetRunningSlice attempts to retrieve the name of the systemd slice in which -// the current process is running. -// This function is a wrapper around the libsystemd C library; if it cannot be -// opened, an error is returned. -func GetRunningSlice() (string, error) { - return getRunningSlice() -} - -// RunningFromSystemService tries to detect whether the current process has -// been invoked from a system service. The condition for this is whether the -// process is _not_ a user process. User processes are those running in session -// scopes or under per-user `systemd --user` instances. -// -// To avoid false positives on systems without `pam_systemd` (which is -// responsible for creating user sessions), this function also uses a heuristic -// to detect whether it's being invoked from a session leader process. This is -// the case if the current process is executed directly from a service file -// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the -// command is instead launched in a subshell or similar so that it is not -// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`) -// -// This function is a wrapper around the libsystemd C library; if this is -// unable to successfully open a handle to the library for any reason (e.g. it -// cannot be found), an error will be returned. -func RunningFromSystemService() (bool, error) { - return runningFromSystemService() -} - -// CurrentUnitName attempts to retrieve the name of the systemd system unit -// from which the calling process has been invoked. It wraps the systemd -// `sd_pid_get_unit` call, with the same caveat: for processes not part of a -// systemd system unit, this function will return an error. -func CurrentUnitName() (string, error) { - return currentUnitName() -} - -// IsRunningSystemd checks whether the host was booted with systemd as its init -// system. This functions similarly to systemd's `sd_booted(3)`: internally, it -// checks whether /run/systemd/system/ exists and is a directory. -// http://www.freedesktop.org/software/systemd/man/sd_booted.html -func IsRunningSystemd() bool { - fi, err := os.Lstat("/run/systemd/system") - if err != nil { - return false - } - return fi.IsDir() -} - -// GetMachineID returns a host's 128-bit machine ID as a string. This functions -// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads -// the contents of /etc/machine-id -// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html -func GetMachineID() (string, error) { - machineID, err := ioutil.ReadFile("/etc/machine-id") - if err != nil { - return "", fmt.Errorf("failed to read /etc/machine-id: %v", err) - } - return strings.TrimSpace(string(machineID)), nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go deleted file mode 100644 index 6269bc73236..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build cgo - -package util - -// #include -// #include -// #include -// -// int -// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid) -// { -// int (*sd_pid_get_owner_uid)(pid_t, uid_t *); -// -// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f; -// return sd_pid_get_owner_uid(pid, uid); -// } -// -// int -// my_sd_pid_get_unit(void *f, pid_t pid, char **unit) -// { -// int (*sd_pid_get_unit)(pid_t, char **); -// -// sd_pid_get_unit = (int (*)(pid_t, char **))f; -// return sd_pid_get_unit(pid, unit); -// } -// -// int -// my_sd_pid_get_slice(void *f, pid_t pid, char **slice) -// { -// int (*sd_pid_get_slice)(pid_t, char **); -// -// sd_pid_get_slice = (int (*)(pid_t, char **))f; -// return sd_pid_get_slice(pid, slice); -// } -// -// int -// am_session_leader() -// { -// return (getsid(0) == getpid()); -// } -import "C" -import ( - "fmt" - "syscall" - "unsafe" - - "github.com/coreos/pkg/dlopen" -) - -var libsystemdNames = []string{ - // systemd < 209 - "libsystemd-login.so.0", - "libsystemd-login.so", - - // systemd >= 209 merged libsystemd-login into libsystemd proper - "libsystemd.so.0", - "libsystemd.so", -} - -func getRunningSlice() (slice string, err error) { - var h *dlopen.LibHandle - h, err = dlopen.GetHandle(libsystemdNames) - if err != nil { - return - } - defer func() { - if err1 := h.Close(); err1 != nil { - err = err1 - } - }() - - sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice") - if err != nil { - return - } - - var s string - sl := C.CString(s) - defer C.free(unsafe.Pointer(sl)) - - ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl) - if ret < 0 { - err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret)) - return - } - - return C.GoString(sl), nil -} - -func runningFromSystemService() (ret bool, err error) { - var h *dlopen.LibHandle - h, err = dlopen.GetHandle(libsystemdNames) - if err != nil { - return - } - defer func() { - if err1 := h.Close(); err1 != nil { - err = err1 - } - }() - - sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid") - if err != nil { - return - } - - var uid C.uid_t - errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid) - serrno := syscall.Errno(-errno) - // when we're running from a unit file, sd_pid_get_owner_uid returns - // ENOENT (systemd <220), ENXIO (systemd 220-223), or ENODATA - // (systemd >=234) - switch { - case errno >= 0: - ret = false - case serrno == syscall.ENOENT, serrno == syscall.ENXIO, serrno == syscall.ENODATA: - // Since the implementation of sessions in systemd relies on - // the `pam_systemd` module, using the sd_pid_get_owner_uid - // heuristic alone can result in false positives if that module - // (or PAM itself) is not present or properly configured on the - // system. As such, we also check if we're the session leader, - // which should be the case if we're invoked from a unit file, - // but not if e.g. we're invoked from the command line from a - // user's login session - ret = C.am_session_leader() == 1 - default: - err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno)) - } - return -} - -func currentUnitName() (unit string, err error) { - var h *dlopen.LibHandle - h, err = dlopen.GetHandle(libsystemdNames) - if err != nil { - return - } - defer func() { - if err1 := h.Close(); err1 != nil { - err = err1 - } - }() - - sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit") - if err != nil { - return - } - - var s string - u := C.CString(s) - defer C.free(unsafe.Pointer(u)) - - ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u) - if ret < 0 { - err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret)) - return - } - - unit = C.GoString(u) - return -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen.go deleted file mode 100644 index 23774f612e0..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package dlopen provides some convenience functions to dlopen a library and -// get its symbols. -package dlopen - -// #cgo LDFLAGS: -ldl -// #include -// #include -import "C" -import ( - "errors" - "fmt" - "unsafe" -) - -var ErrSoNotFound = errors.New("unable to open a handle to the library") - -// LibHandle represents an open handle to a library (.so) -type LibHandle struct { - Handle unsafe.Pointer - Libname string -} - -// GetHandle tries to get a handle to a library (.so), attempting to access it -// by the names specified in libs and returning the first that is successfully -// opened. Callers are responsible for closing the handler. If no library can -// be successfully opened, an error is returned. -func GetHandle(libs []string) (*LibHandle, error) { - for _, name := range libs { - libname := C.CString(name) - defer C.free(unsafe.Pointer(libname)) - handle := C.dlopen(libname, C.RTLD_LAZY) - if handle != nil { - h := &LibHandle{ - Handle: handle, - Libname: name, - } - return h, nil - } - } - return nil, ErrSoNotFound -} - -// GetSymbolPointer takes a symbol name and returns a pointer to the symbol. -func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) { - sym := C.CString(symbol) - defer C.free(unsafe.Pointer(sym)) - - C.dlerror() - p := C.dlsym(l.Handle, sym) - e := C.dlerror() - if e != nil { - return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e))) - } - - return p, nil -} - -// Close closes a LibHandle. -func (l *LibHandle) Close() error { - C.dlerror() - C.dlclose(l.Handle) - e := C.dlerror() - if e != nil { - return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e))) - } - - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go deleted file mode 100644 index 48a660104fb..00000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build linux - -package dlopen - -// #include -// #include -// -// int -// my_strlen(void *f, const char *s) -// { -// size_t (*strlen)(const char *); -// -// strlen = (size_t (*)(const char *))f; -// return strlen(s); -// } -import "C" - -import ( - "fmt" - "unsafe" -) - -func strlen(libs []string, s string) (int, error) { - h, err := GetHandle(libs) - if err != nil { - return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err) - } - defer h.Close() - - f := "strlen" - cs := C.CString(s) - defer C.free(unsafe.Pointer(cs)) - - strlen, err := h.GetSymbolPointer(f) - if err != nil { - return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err) - } - - len := C.my_strlen(strlen, cs) - - return int(len), nil -} diff --git a/cluster-autoscaler/vendor/github.com/docker/go-units/MAINTAINERS b/cluster-autoscaler/vendor/github.com/docker/go-units/MAINTAINERS index 9b3b6b101ef..4aac7c74110 100644 --- a/cluster-autoscaler/vendor/github.com/docker/go-units/MAINTAINERS +++ b/cluster-autoscaler/vendor/github.com/docker/go-units/MAINTAINERS @@ -27,7 +27,7 @@ [people.akihirosuda] Name = "Akihiro Suda" - Email = "suda.akihiro@lab.ntt.co.jp" + Email = "akihiro.suda.cz@hco.ntt.co.jp" GitHub = "AkihiroSuda" [people.dnephin] diff --git a/cluster-autoscaler/vendor/github.com/docker/go-units/circle.yml b/cluster-autoscaler/vendor/github.com/docker/go-units/circle.yml index 9043b35478c..af9d6055293 100644 --- a/cluster-autoscaler/vendor/github.com/docker/go-units/circle.yml +++ b/cluster-autoscaler/vendor/github.com/docker/go-units/circle.yml @@ -1,7 +1,7 @@ dependencies: post: # install golint - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint test: pre: diff --git a/cluster-autoscaler/vendor/github.com/docker/go-units/duration.go b/cluster-autoscaler/vendor/github.com/docker/go-units/duration.go index ba02af26dc5..48dd8744d43 100644 --- a/cluster-autoscaler/vendor/github.com/docker/go-units/duration.go +++ b/cluster-autoscaler/vendor/github.com/docker/go-units/duration.go @@ -18,7 +18,7 @@ func HumanDuration(d time.Duration) string { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" - } else if minutes < 46 { + } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours() + 0.5); hours == 1 { return "About an hour" diff --git a/cluster-autoscaler/vendor/github.com/docker/go-units/ulimit.go b/cluster-autoscaler/vendor/github.com/docker/go-units/ulimit.go index 5ac7fd825fc..fca0400cc82 100644 --- a/cluster-autoscaler/vendor/github.com/docker/go-units/ulimit.go +++ b/cluster-autoscaler/vendor/github.com/docker/go-units/ulimit.go @@ -96,8 +96,13 @@ func ParseUlimit(val string) (*Ulimit, error) { return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } } return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go index d36bec0e800..b6b7f2bb5e3 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go @@ -145,3 +145,23 @@ const ( // addresses. SourceHashing = "sh" ) + +const ( + // ConnFwdMask is a mask for the fwd methods + ConnFwdMask = 0x0007 + + // ConnFwdMasq denotes forwarding via masquerading/NAT + ConnFwdMasq = 0x0000 + + // ConnFwdLocalNode denotes forwarding to a local node + ConnFwdLocalNode = 0x0001 + + // ConnFwdTunnel denotes forwarding via a tunnel + ConnFwdTunnel = 0x0002 + + // ConnFwdDirectRoute denotes forwarding via direct routing + ConnFwdDirectRoute = 0x0003 + + // ConnFwdBypass denotes forwarding while bypassing the cache + ConnFwdBypass = 0x0004 +) diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go index ab10717089e..61b6f0a5e4e 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -3,14 +3,13 @@ package ipvs import ( + "fmt" "net" - "syscall" "time" - "fmt" - "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) const ( @@ -62,6 +61,17 @@ type Destination struct { LowerThreshold uint32 ActiveConnections int InactiveConnections int + Stats DstStats +} + +// DstStats defines IPVS destination (real server) statistics +type DstStats SvcStats + +// Config defines IPVS timeout configuration +type Config struct { + TimeoutTCP time.Duration + TimeoutTCPFin time.Duration + TimeoutUDP time.Duration } // Handle provides a namespace specific ipvs handle to program ipvs @@ -87,16 +97,16 @@ func New(path string) (*Handle, error) { } defer n.Close() - sock, err := nl.GetNetlinkSocketAt(n, netns.None(), syscall.NETLINK_GENERIC) + sock, err := nl.GetNetlinkSocketAt(n, netns.None(), unix.NETLINK_GENERIC) if err != nil { return nil, err } // Add operation timeout to avoid deadlocks - tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds()) + tv := unix.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds()) if err := sock.SetSendTimeout(&tv); err != nil { return nil, err } - tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds()) + tv = unix.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds()) if err := sock.SetReceiveTimeout(&tv); err != nil { return nil, err } @@ -184,3 +194,13 @@ func (i *Handle) GetService(s *Service) (*Service, error) { return res[0], nil } + +// GetConfig returns the current timeout configuration +func (i *Handle) GetConfig() (*Config, error) { + return i.doGetConfigCmd() +} + +// SetConfig set the current timeout configuration. 0: no change +func (i *Handle) SetConfig(c *Config) error { + return i.doSetConfigCmd(c) +} diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go index 3c7b1a562ae..083909ae050 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -12,6 +12,7 @@ import ( "sync" "sync/atomic" "syscall" + "time" "unsafe" "github.com/sirupsen/logrus" @@ -443,6 +444,12 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.ActiveConnections = int(native.Uint16(attr.Value)) case ipvsDestAttrInactiveConnections: d.InactiveConnections = int(native.Uint16(attr.Value)) + case ipvsSvcAttrStats: + stats, err := assembleStats(attr.Value) + if err != nil { + return nil, err + } + d.Stats = DstStats(stats) } } return &d, nil @@ -497,6 +504,60 @@ func (i *Handle) doGetDestinationsCmd(s *Service, d *Destination) ([]*Destinatio return res, nil } +// parseConfig given a ipvs netlink response this function will respond with a valid config entry, an error otherwise +func (i *Handle) parseConfig(msg []byte) (*Config, error) { + var c Config + + //Remove General header for this message + hdr := deserializeGenlMsg(msg) + attrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + attrType := int(attr.Attr.Type) + switch attrType { + case ipvsCmdAttrTimeoutTCP: + c.TimeoutTCP = time.Duration(native.Uint32(attr.Value)) * time.Second + case ipvsCmdAttrTimeoutTCPFin: + c.TimeoutTCPFin = time.Duration(native.Uint32(attr.Value)) * time.Second + case ipvsCmdAttrTimeoutUDP: + c.TimeoutUDP = time.Duration(native.Uint32(attr.Value)) * time.Second + } + } + + return &c, nil +} + +// doGetConfigCmd a wrapper function to be used by GetConfig +func (i *Handle) doGetConfigCmd() (*Config, error) { + msg, err := i.doCmdWithoutAttr(ipvsCmdGetConfig) + if err != nil { + return nil, err + } + + res, err := i.parseConfig(msg[0]) + if err != nil { + return res, err + } + return res, nil +} + +// doSetConfigCmd a wrapper function to be used by SetConfig +func (i *Handle) doSetConfigCmd(c *Config) error { + req := newIPVSRequest(ipvsCmdSetConfig) + req.Seq = atomic.AddUint32(&i.seq, 1) + + req.AddData(nl.NewRtAttr(ipvsCmdAttrTimeoutTCP, nl.Uint32Attr(uint32(c.TimeoutTCP.Seconds())))) + req.AddData(nl.NewRtAttr(ipvsCmdAttrTimeoutTCPFin, nl.Uint32Attr(uint32(c.TimeoutTCPFin.Seconds())))) + req.AddData(nl.NewRtAttr(ipvsCmdAttrTimeoutUDP, nl.Uint32Attr(uint32(c.TimeoutUDP.Seconds())))) + + _, err := execute(i.sock, req, 0) + + return err +} + // IPVS related netlink message format explained /* EACH NETLINK MSG is of the below format, this is what we will receive from execute() api. diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml index 930860e0a80..0e9d6edc010 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,8 +1,7 @@ language: go go: - - "1.3" - - "1.4" - - "1.10" + - 1.3 + - 1.4 script: - go test - go build diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go index 6e7f14fc7fb..4fb4054a8b7 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "io" "reflect" "strconv" @@ -27,19 +26,15 @@ func Marshal(o interface{}) ([]byte, error) { return y, nil } -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo, yaml.Unmarshal) + j, err := yamlToJSON(y, &vo) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = json.Unmarshal(j, o) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -47,21 +42,6 @@ func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { return nil } -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - // Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. @@ -80,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -90,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) -} - -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSON(y, nil) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := yaml.Unmarshal(y, &yamlObj) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a222a..00000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod index 422045df2d8..3e45e225b5d 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -1,6 +1,9 @@ module github.com/go-openapi/jsonpointer require ( - github.com/go-openapi/swag v0.19.2 + github.com/go-openapi/swag v0.19.5 + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect github.com/stretchr/testify v1.3.0 ) + +go 1.13 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum index f5e28beb4bf..953d4f354e5 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -1,8 +1,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -10,6 +10,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go index fe2d6ee574f..b284eb77a62 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -135,7 +135,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam kv := reflect.ValueOf(decodedToken) mv := rValue.MapIndex(kv) - if mv.IsValid() && !swag.IsZero(mv) { + if mv.IsValid() { return mv.Interface(), kind, nil } return nil, kind, fmt.Errorf("object has no key %q", decodedToken) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod index 35adddfe40f..aff1d0163ec 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod @@ -3,8 +3,10 @@ module github.com/go-openapi/jsonreference require ( github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/jsonpointer v0.19.3 github.com/stretchr/testify v1.3.0 - golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect golang.org/x/text v0.3.2 // indirect ) + +go 1.13 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum index f1a7a34e3ce..c7ceab5802d 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum @@ -7,8 +7,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -16,6 +20,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -24,6 +30,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go index d5ec7b900a7..c67e2d877b0 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go @@ -21,7 +21,7 @@ import ( func bindataRead(data []byte, name string) ([]byte, error) { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %v", name, err) } var buf bytes.Buffer @@ -29,7 +29,7 @@ func bindataRead(data []byte, name string) ([]byte, error) { clErr := gz.Close() if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %v", name, err) } if clErr != nil { return nil, err @@ -85,7 +85,7 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} return a, nil } @@ -105,7 +105,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} return a, nil } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod index 42073be0075..02a142c03c2 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod @@ -1,14 +1,17 @@ module github.com/go-openapi/spec require ( - github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/jsonpointer v0.19.3 github.com/go-openapi/jsonreference v0.19.2 - github.com/go-openapi/swag v0.19.2 + github.com/go-openapi/swag v0.19.5 github.com/kr/pty v1.1.5 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.3.0 golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect gopkg.in/yaml.v2 v2.2.2 ) + +go 1.13 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum index 73e97a2d73e..86db601c978 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum @@ -13,6 +13,8 @@ github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzN github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= @@ -21,6 +23,8 @@ github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi88 github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -31,6 +35,8 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3 github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -47,6 +53,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema_loader.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema_loader.go index c34a96fa04e..9e20e96c2a1 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema_loader.go @@ -160,6 +160,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) if !fromCache { b, err := r.loadDoc(normalized) if err != nil { + debugLog("unable to load the document: %v", err) return nil, url.URL{}, false, err } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore index 5862205eec9..d69b53accc5 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore @@ -1,3 +1,4 @@ secrets.yml vendor Godeps +.idea diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go index 4e446ff703f..7da35c316e5 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go @@ -39,11 +39,12 @@ func IsFloat64AJSONInteger(f float64) bool { diff := math.Abs(f - g) // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases - if f == g { // best case + switch { + case f == g: // best case return true - } else if f == float64(int64(f)) || f == float64(uint64(f)) { // optimistic case + case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case return true - } else if f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64 { // very close to 0 values + case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values return diff < (epsilon * math.SmallestNonzeroFloat64) } // check the relative error diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go index 62ab15e5405..edf93d84c60 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go @@ -99,7 +99,7 @@ func ConcatJSON(blobs ...[]byte) []byte { last := len(blobs) - 1 for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { // strips trailing null objects - last = last - 1 + last-- if last < 0 { // there was nothing but "null"s or nil... return nil diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go index 87488273dd6..9eac16afb2e 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go @@ -28,6 +28,14 @@ var initialisms []string var isInitialism func(string) bool +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// e.g. to help converting "123" into "{prefix}123" +// +// The default is to prefix with "X" +var GoNamePrefixFunc func(string) string + func init() { // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 var configuredInitialisms = map[string]bool{ @@ -288,8 +296,17 @@ func ToGoName(name string) string { } if len(result) > 0 { - if !unicode.IsUpper([]rune(result)[0]) { - result = "X" + result + // Only prefix with X when the first character isn't an ascii letter + first := []rune(result)[0] + if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { + if GoNamePrefixFunc == nil { + return "X" + result + } + result = GoNamePrefixFunc(name) + result + } + first = []rune(result)[0] + if unicode.IsLetter(first) && !unicode.IsUpper(first) { + result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) } } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go index 435e2948eb6..ec96914405b 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go @@ -143,19 +143,43 @@ func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { } func transformData(input interface{}) (out interface{}, err error) { + format := func(t interface{}) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T", k) + } + } + switch in := input.(type) { case yaml.MapSlice: o := make(JSONMapSlice, len(in)) for i, mi := range in { var nmi JSONMapItem - switch k := mi.Key.(type) { - case string: - nmi.Key = k - case int: - nmi.Key = strconv.Itoa(k) - default: - return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key) + if nmi.Key, err = format(mi.Key); err != nil { + return nil, err } v, ert := transformData(mi.Value) @@ -170,13 +194,8 @@ func transformData(input interface{}) (out interface{}, err error) { o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { var nmi JSONMapItem - switch k := ke.(type) { - case string: - nmi.Key = k - case int: - nmi.Key = strconv.Itoa(k) - default: - return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke) + if nmi.Key, err = format(ke); err != nil { + return nil, err } v, ert := transformData(va) diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/.travis.yml b/cluster-autoscaler/vendor/github.com/godbus/dbus/.travis.yml index 2e1bbb78c39..9cd57f432b0 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/.travis.yml @@ -4,8 +4,10 @@ go_import_path: github.com/godbus/dbus sudo: true go: - - 1.6.3 - 1.7.3 + - 1.8.7 + - 1.9.5 + - 1.10.1 - tip env: @@ -38,3 +40,7 @@ addons: - dbus-x11 before_install: + +script: + - go test -v -race ./... # Run all the tests with the race detector enabled + - go vet ./... # go vet is the official Go static analyzer diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/README.markdown b/cluster-autoscaler/vendor/github.com/godbus/dbus/README.markdown index d37f4e2ed8c..fd29648752d 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/README.markdown +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/README.markdown @@ -14,7 +14,7 @@ D-Bus message bus system. ### Installation -This packages requires Go 1.1. If you installed it and set up your GOPATH, just run: +This packages requires Go 1.7. If you installed it and set up your GOPATH, just run: ``` go get github.com/godbus/dbus diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/auth.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/auth.go index 98017b693ee..b0dcb54e6b5 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/auth.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/auth.go @@ -116,7 +116,6 @@ func (conn *Conn) Auth(methods []Auth) error { return err } go conn.inWorker() - go conn.outWorker() return nil } } diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/auth_anonymous.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/auth_anonymous.go new file mode 100644 index 00000000000..75f3ad34d7a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/auth_anonymous.go @@ -0,0 +1,16 @@ +package dbus + +// AuthAnonymous returns an Auth that uses the ANONYMOUS mechanism. +func AuthAnonymous() Auth { + return &authAnonymous{} +} + +type authAnonymous struct{} + +func (a *authAnonymous) FirstData() (name, resp []byte, status AuthStatus) { + return []byte("ANONYMOUS"), nil, AuthOk +} + +func (a *authAnonymous) HandleData(data []byte) (resp []byte, status AuthStatus) { + return nil, AuthError +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/call.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/call.go index ba6e73f607a..2cb189012e7 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/call.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/call.go @@ -1,9 +1,12 @@ package dbus import ( + "context" "errors" ) +var errSignature = errors.New("dbus: mismatched signature") + // Call represents a pending or completed method call. type Call struct { Destination string @@ -20,9 +23,25 @@ type Call struct { // Holds the response once the call is done. Body []interface{} + + // tracks context and canceler + ctx context.Context + ctxCanceler context.CancelFunc } -var errSignature = errors.New("dbus: mismatched signature") +func (c *Call) Context() context.Context { + if c.ctx == nil { + return context.Background() + } + + return c.ctx +} + +func (c *Call) ContextCancel() { + if c.ctxCanceler != nil { + c.ctxCanceler() + } +} // Store stores the body of the reply into the provided pointers. It returns // an error if the signatures of the body and retvalues don't match, or if @@ -34,3 +53,8 @@ func (c *Call) Store(retvalues ...interface{}) error { return Store(c.Body, retvalues...) } + +func (c *Call) done() { + c.Done <- c + c.ContextCancel() +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn.go index 5720e2ebbf0..b38920baf5b 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn.go @@ -1,6 +1,7 @@ package dbus import ( + "context" "errors" "io" "os" @@ -14,7 +15,6 @@ var ( systemBusLck sync.Mutex sessionBus *Conn sessionBusLck sync.Mutex - sessionEnvLck sync.Mutex ) // ErrClosed is the error returned by calls on a closed connection. @@ -35,23 +35,13 @@ type Conn struct { unixFD bool uuid string - names []string - namesLck sync.RWMutex - - serialLck sync.Mutex - nextSerial uint32 - serialUsed map[uint32]bool - - calls map[uint32]*Call - callsLck sync.RWMutex - - handler Handler - - out chan *Message - closed bool - outLck sync.RWMutex - + handler Handler signalHandler SignalHandler + serialGen SerialGenerator + + names *nameTracker + calls *callTracker + outHandler *outputHandler eavesdropped chan<- *Message eavesdroppedLck sync.Mutex @@ -87,32 +77,31 @@ func SessionBus() (conn *Conn, err error) { } func getSessionBusAddress() (string, error) { - sessionEnvLck.Lock() - defer sessionEnvLck.Unlock() - address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") - if address != "" && address != "autolaunch:" { + if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" { + return address, nil + + } else if address := tryDiscoverDbusSessionBusAddress(); address != "" { + os.Setenv("DBUS_SESSION_BUS_ADDRESS", address) return address, nil } return getSessionBusPlatformAddress() } // SessionBusPrivate returns a new private connection to the session bus. -func SessionBusPrivate() (*Conn, error) { +func SessionBusPrivate(opts ...ConnOption) (*Conn, error) { address, err := getSessionBusAddress() if err != nil { return nil, err } - return Dial(address) + return Dial(address, opts...) } // SessionBusPrivate returns a new private connection to the session bus. +// +// Deprecated: use SessionBusPrivate with options instead. func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { - address, err := getSessionBusAddress() - if err != nil { - return nil, err - } - return DialHandler(address, handler, signalHandler) + return SessionBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler)) } // SystemBus returns a shared connection to the system bus, connecting to it if @@ -145,53 +134,93 @@ func SystemBus() (conn *Conn, err error) { } // SystemBusPrivate returns a new private connection to the system bus. -func SystemBusPrivate() (*Conn, error) { - return Dial(getSystemBusPlatformAddress()) +func SystemBusPrivate(opts ...ConnOption) (*Conn, error) { + return Dial(getSystemBusPlatformAddress(), opts...) } // SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers. +// +// Deprecated: use SystemBusPrivate with options instead. func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { - return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler) + return SystemBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler)) } // Dial establishes a new private connection to the message bus specified by address. -func Dial(address string) (*Conn, error) { +func Dial(address string, opts ...ConnOption) (*Conn, error) { tr, err := getTransport(address) if err != nil { return nil, err } - return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler()) + return newConn(tr, opts...) } // DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers. +// +// Deprecated: use Dial with options instead. func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { - tr, err := getTransport(address) - if err != nil { - return nil, err + return Dial(address, WithSignalHandler(signalHandler)) +} + +// ConnOption is a connection option. +type ConnOption func(conn *Conn) error + +// WithHandler overrides the default handler. +func WithHandler(handler Handler) ConnOption { + return func(conn *Conn) error { + conn.handler = handler + return nil + } +} + +// WithSignalHandler overrides the default signal handler. +func WithSignalHandler(handler SignalHandler) ConnOption { + return func(conn *Conn) error { + conn.signalHandler = handler + return nil + } +} + +// WithSerialGenerator overrides the default signals generator. +func WithSerialGenerator(gen SerialGenerator) ConnOption { + return func(conn *Conn) error { + conn.serialGen = gen + return nil } - return newConn(tr, handler, signalHandler) } // NewConn creates a new private *Conn from an already established connection. -func NewConn(conn io.ReadWriteCloser) (*Conn, error) { - return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler()) +func NewConn(conn io.ReadWriteCloser, opts ...ConnOption) (*Conn, error) { + return newConn(genericTransport{conn}, opts...) } // NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers. +// +// Deprecated: use NewConn with options instead. func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) { - return newConn(genericTransport{conn}, handler, signalHandler) + return NewConn(genericTransport{conn}, WithHandler(handler), WithSignalHandler(signalHandler)) } // newConn creates a new *Conn from a transport. -func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) { +func newConn(tr transport, opts ...ConnOption) (*Conn, error) { conn := new(Conn) conn.transport = tr - conn.calls = make(map[uint32]*Call) - conn.out = make(chan *Message, 10) - conn.handler = handler - conn.signalHandler = signalHandler - conn.nextSerial = 1 - conn.serialUsed = map[uint32]bool{0: true} + for _, opt := range opts { + if err := opt(conn); err != nil { + return nil, err + } + } + conn.calls = newCallTracker() + if conn.handler == nil { + conn.handler = NewDefaultHandler() + } + if conn.signalHandler == nil { + conn.signalHandler = NewDefaultSignalHandler() + } + if conn.serialGen == nil { + conn.serialGen = newSerialGenerator() + } + conn.outHandler = &outputHandler{conn: conn} + conn.names = newNameTracker() conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") return conn, nil } @@ -206,18 +235,7 @@ func (conn *Conn) BusObject() BusObject { // and the channels passed to Eavesdrop and Signal are closed. This method must // not be called on shared connections. func (conn *Conn) Close() error { - conn.outLck.Lock() - if conn.closed { - // inWorker calls Close on read error, the read error may - // be caused by another caller calling Close to shutdown the - // dbus connection, a double-close scenario we prevent here. - conn.outLck.Unlock() - return nil - } - close(conn.out) - conn.closed = true - conn.outLck.Unlock() - + conn.outHandler.close() if term, ok := conn.signalHandler.(Terminator); ok { term.Terminate() } @@ -249,17 +267,9 @@ func (conn *Conn) Eavesdrop(ch chan<- *Message) { conn.eavesdroppedLck.Unlock() } -// getSerial returns an unused serial. +// GetSerial returns an unused serial. func (conn *Conn) getSerial() uint32 { - conn.serialLck.Lock() - defer conn.serialLck.Unlock() - n := conn.nextSerial - for conn.serialUsed[n] { - n++ - } - conn.serialUsed[n] = true - conn.nextSerial = n + 1 - return n + return conn.serialGen.GetSerial() } // Hello sends the initial org.freedesktop.DBus.Hello call. This method must be @@ -271,10 +281,7 @@ func (conn *Conn) Hello() error { if err != nil { return err } - conn.namesLck.Lock() - conn.names = make([]string, 1) - conn.names[0] = s - conn.namesLck.Unlock() + conn.names.acquireUniqueConnectionName(s) return nil } @@ -283,109 +290,48 @@ func (conn *Conn) Hello() error { func (conn *Conn) inWorker() { for { msg, err := conn.ReadMessage() - if err == nil { - conn.eavesdroppedLck.Lock() - if conn.eavesdropped != nil { - select { - case conn.eavesdropped <- msg: - default: - } - conn.eavesdroppedLck.Unlock() - continue - } - conn.eavesdroppedLck.Unlock() - dest, _ := msg.Headers[FieldDestination].value.(string) - found := false - if dest == "" { - found = true - } else { - conn.namesLck.RLock() - if len(conn.names) == 0 { - found = true - } - for _, v := range conn.names { - if dest == v { - found = true - break - } - } - conn.namesLck.RUnlock() - } - if !found { - // Eavesdropped a message, but no channel for it is registered. - // Ignore it. - continue - } - switch msg.Type { - case TypeMethodReply, TypeError: - serial := msg.Headers[FieldReplySerial].value.(uint32) - conn.callsLck.Lock() - if c, ok := conn.calls[serial]; ok { - if msg.Type == TypeError { - name, _ := msg.Headers[FieldErrorName].value.(string) - c.Err = Error{name, msg.Body} - } else { - c.Body = msg.Body - } - c.Done <- c - conn.serialLck.Lock() - delete(conn.serialUsed, serial) - conn.serialLck.Unlock() - delete(conn.calls, serial) - } - conn.callsLck.Unlock() - case TypeSignal: - iface := msg.Headers[FieldInterface].value.(string) - member := msg.Headers[FieldMember].value.(string) - // as per http://dbus.freedesktop.org/doc/dbus-specification.html , - // sender is optional for signals. - sender, _ := msg.Headers[FieldSender].value.(string) - if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { - if member == "NameLost" { - // If we lost the name on the bus, remove it from our - // tracking list. - name, ok := msg.Body[0].(string) - if !ok { - panic("Unable to read the lost name") - } - conn.namesLck.Lock() - for i, v := range conn.names { - if v == name { - conn.names = append(conn.names[:i], - conn.names[i+1:]...) - } - } - conn.namesLck.Unlock() - } else if member == "NameAcquired" { - // If we acquired the name on the bus, add it to our - // tracking list. - name, ok := msg.Body[0].(string) - if !ok { - panic("Unable to read the acquired name") - } - conn.namesLck.Lock() - conn.names = append(conn.names, name) - conn.namesLck.Unlock() - } - } - conn.handleSignal(msg) - case TypeMethodCall: - go conn.handleCall(msg) + if err != nil { + if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.calls.finalizeAllWithError(err) + return } - } else if _, ok := err.(InvalidMessageError); !ok { - // Some read error occured (usually EOF); we can't really do - // anything but to shut down all stuff and returns errors to all - // pending replies. - conn.Close() - conn.callsLck.RLock() - for _, v := range conn.calls { - v.Err = err - v.Done <- v + // invalid messages are ignored + continue + } + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: } - conn.callsLck.RUnlock() - return + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := dest == "" || + !conn.names.uniqueNameIsKnown() || + conn.names.isKnownName(dest) + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeError: + conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg)) + case TypeMethodReply: + conn.serialGen.RetireSerial(conn.calls.handleReply(msg)) + case TypeSignal: + conn.handleSignal(msg) + case TypeMethodCall: + go conn.handleCall(msg) } - // invalid messages are ignored + } } @@ -395,6 +341,25 @@ func (conn *Conn) handleSignal(msg *Message) { // as per http://dbus.freedesktop.org/doc/dbus-specification.html , // sender is optional for signals. sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { + if member == "NameLost" { + // If we lost the name on the bus, remove it from our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the lost name") + } + conn.names.loseName(name) + } else if member == "NameAcquired" { + // If we acquired the name on the bus, add it to our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the acquired name") + } + conn.names.acquireName(name) + } + } signal := &Signal{ Sender: sender, Path: msg.Headers[FieldPath].value.(ObjectPath), @@ -408,12 +373,7 @@ func (conn *Conn) handleSignal(msg *Message) { // connection. The slice is always at least one element long, the first element // being the unique name of the connection. func (conn *Conn) Names() []string { - conn.namesLck.RLock() - // copy the slice so it can't be modified - s := make([]string, len(conn.names)) - copy(s, conn.names) - conn.namesLck.RUnlock() - return s + return conn.names.listKnownNames() } // Object returns the object identified by the given destination name and path. @@ -423,24 +383,17 @@ func (conn *Conn) Object(dest string, path ObjectPath) BusObject { // outWorker runs in an own goroutine, encoding and sending messages that are // sent to conn.out. -func (conn *Conn) outWorker() { - for msg := range conn.out { - err := conn.SendMessage(msg) - conn.callsLck.RLock() - if err != nil { - if c := conn.calls[msg.serial]; c != nil { - c.Err = err - c.Done <- c - } - conn.serialLck.Lock() - delete(conn.serialUsed, msg.serial) - conn.serialLck.Unlock() - } else if msg.Type != TypeMethodCall { - conn.serialLck.Lock() - delete(conn.serialUsed, msg.serial) - conn.serialLck.Unlock() - } - conn.callsLck.RUnlock() +func (conn *Conn) sendMessage(msg *Message) { + conn.sendMessageAndIfClosed(msg, func() {}) +} + +func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { + err := conn.outHandler.sendAndIfClosed(msg, ifClosed) + conn.calls.handleSendError(msg, err) + if err != nil { + conn.serialGen.RetireSerial(msg.serial) + } else if msg.Type != TypeMethodCall { + conn.serialGen.RetireSerial(msg.serial) } } @@ -451,8 +404,21 @@ func (conn *Conn) outWorker() { // once the call is complete. Otherwise, ch is ignored and a Call structure is // returned of which only the Err member is valid. func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { - var call *Call + return conn.send(context.Background(), msg, ch) +} +// SendWithContext acts like Send but takes a context +func (conn *Conn) SendWithContext(ctx context.Context, msg *Message, ch chan *Call) *Call { + return conn.send(ctx, msg, ch) +} + +func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { + if ctx == nil { + panic("nil context") + } + + var call *Call + ctx, canceler := context.WithCancel(ctx) msg.serial = conn.getSerial() if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { if ch == nil { @@ -468,26 +434,23 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { call.Method = iface + "." + member call.Args = msg.Body call.Done = ch - conn.callsLck.Lock() - conn.calls[msg.serial] = call - conn.callsLck.Unlock() - conn.outLck.RLock() - if conn.closed { - call.Err = ErrClosed - call.Done <- call - } else { - conn.out <- msg - } - conn.outLck.RUnlock() + call.ctx = ctx + call.ctxCanceler = canceler + conn.calls.track(msg.serial, call) + go func() { + <-ctx.Done() + conn.calls.handleSendError(msg, ctx.Err()) + }() + conn.sendMessageAndIfClosed(msg, func() { + conn.calls.handleSendError(msg, ErrClosed) + canceler() + }) } else { - conn.outLck.RLock() - if conn.closed { + canceler() + call = &Call{Err: nil} + conn.sendMessageAndIfClosed(msg, func() { call = &Call{Err: ErrClosed} - } else { - conn.out <- msg - call = &Call{Err: nil} - } - conn.outLck.RUnlock() + }) } return call } @@ -520,11 +483,7 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) { if len(e.Body) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) } - conn.outLck.RLock() - if !conn.closed { - conn.out <- msg - } - conn.outLck.RUnlock() + conn.sendMessage(msg) } // sendReply creates a method reply message corresponding to the parameters and @@ -542,11 +501,7 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } - conn.outLck.RLock() - if !conn.closed { - conn.out <- msg - } - conn.outLck.RUnlock() + conn.sendMessage(msg) } func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) { @@ -681,3 +636,212 @@ func getKey(s, key string) string { } return "" } + +type outputHandler struct { + conn *Conn + sendLck sync.Mutex + closed struct { + isClosed bool + lck sync.RWMutex + } +} + +func (h *outputHandler) sendAndIfClosed(msg *Message, ifClosed func()) error { + h.closed.lck.RLock() + defer h.closed.lck.RUnlock() + if h.closed.isClosed { + ifClosed() + return nil + } + h.sendLck.Lock() + defer h.sendLck.Unlock() + return h.conn.SendMessage(msg) +} + +func (h *outputHandler) close() { + h.closed.lck.Lock() + defer h.closed.lck.Unlock() + h.closed.isClosed = true +} + +type serialGenerator struct { + lck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool +} + +func newSerialGenerator() *serialGenerator { + return &serialGenerator{ + serialUsed: map[uint32]bool{0: true}, + nextSerial: 1, + } +} + +func (gen *serialGenerator) GetSerial() uint32 { + gen.lck.Lock() + defer gen.lck.Unlock() + n := gen.nextSerial + for gen.serialUsed[n] { + n++ + } + gen.serialUsed[n] = true + gen.nextSerial = n + 1 + return n +} + +func (gen *serialGenerator) RetireSerial(serial uint32) { + gen.lck.Lock() + defer gen.lck.Unlock() + delete(gen.serialUsed, serial) +} + +type nameTracker struct { + lck sync.RWMutex + unique string + names map[string]struct{} +} + +func newNameTracker() *nameTracker { + return &nameTracker{names: map[string]struct{}{}} +} +func (tracker *nameTracker) acquireUniqueConnectionName(name string) { + tracker.lck.Lock() + defer tracker.lck.Unlock() + tracker.unique = name +} +func (tracker *nameTracker) acquireName(name string) { + tracker.lck.Lock() + defer tracker.lck.Unlock() + tracker.names[name] = struct{}{} +} +func (tracker *nameTracker) loseName(name string) { + tracker.lck.Lock() + defer tracker.lck.Unlock() + delete(tracker.names, name) +} + +func (tracker *nameTracker) uniqueNameIsKnown() bool { + tracker.lck.RLock() + defer tracker.lck.RUnlock() + return tracker.unique != "" +} +func (tracker *nameTracker) isKnownName(name string) bool { + tracker.lck.RLock() + defer tracker.lck.RUnlock() + _, ok := tracker.names[name] + return ok || name == tracker.unique +} +func (tracker *nameTracker) listKnownNames() []string { + tracker.lck.RLock() + defer tracker.lck.RUnlock() + out := make([]string, 0, len(tracker.names)+1) + out = append(out, tracker.unique) + for k := range tracker.names { + out = append(out, k) + } + return out +} + +type callTracker struct { + calls map[uint32]*Call + lck sync.RWMutex +} + +func newCallTracker() *callTracker { + return &callTracker{calls: map[uint32]*Call{}} +} + +func (tracker *callTracker) track(sn uint32, call *Call) { + tracker.lck.Lock() + tracker.calls[sn] = call + tracker.lck.Unlock() +} + +func (tracker *callTracker) handleReply(msg *Message) uint32 { + serial := msg.Headers[FieldReplySerial].value.(uint32) + tracker.lck.RLock() + _, ok := tracker.calls[serial] + tracker.lck.RUnlock() + if ok { + tracker.finalizeWithBody(serial, msg.Body) + } + return serial +} + +func (tracker *callTracker) handleDBusError(msg *Message) uint32 { + serial := msg.Headers[FieldReplySerial].value.(uint32) + tracker.lck.RLock() + _, ok := tracker.calls[serial] + tracker.lck.RUnlock() + if ok { + name, _ := msg.Headers[FieldErrorName].value.(string) + tracker.finalizeWithError(serial, Error{name, msg.Body}) + } + return serial +} + +func (tracker *callTracker) handleSendError(msg *Message, err error) { + if err == nil { + return + } + tracker.lck.RLock() + _, ok := tracker.calls[msg.serial] + tracker.lck.RUnlock() + if ok { + tracker.finalizeWithError(msg.serial, err) + } +} + +// finalize was the only func that did not strobe Done +func (tracker *callTracker) finalize(sn uint32) { + tracker.lck.Lock() + defer tracker.lck.Unlock() + c, ok := tracker.calls[sn] + if ok { + delete(tracker.calls, sn) + c.ContextCancel() + } + return +} + +func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) { + tracker.lck.Lock() + c, ok := tracker.calls[sn] + if ok { + delete(tracker.calls, sn) + } + tracker.lck.Unlock() + if ok { + c.Body = body + c.done() + } + return +} + +func (tracker *callTracker) finalizeWithError(sn uint32, err error) { + tracker.lck.Lock() + c, ok := tracker.calls[sn] + if ok { + delete(tracker.calls, sn) + } + tracker.lck.Unlock() + if ok { + c.Err = err + c.done() + } + return +} + +func (tracker *callTracker) finalizeAllWithError(err error) { + tracker.lck.Lock() + closedCalls := make([]*Call, 0, len(tracker.calls)) + for sn := range tracker.calls { + closedCalls = append(closedCalls, tracker.calls[sn]) + } + tracker.calls = map[uint32]*Call{} + tracker.lck.Unlock() + for _, call := range closedCalls { + call.Err = err + call.done() + } +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_darwin.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_darwin.go index c015f80ce7e..6e2e4020216 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_darwin.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_darwin.go @@ -31,3 +31,7 @@ func getSystemBusPlatformAddress() string { } return defaultSystemBusAddress } + +func tryDiscoverDbusSessionBusAddress() string { + return "" +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_other.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_other.go index 254c9f2ef68..289044a44d4 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_other.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_other.go @@ -6,12 +6,14 @@ import ( "bytes" "errors" "fmt" + "io/ioutil" "os" "os/exec" + "os/user" + "path" + "strings" ) -const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" - func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("dbus-launch") b, err := cmd.CombinedOutput() @@ -33,10 +35,57 @@ func getSessionBusPlatformAddress() (string, error) { return addr, nil } -func getSystemBusPlatformAddress() string { - address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") - if address != "" { - return fmt.Sprintf("unix:path=%s", address) +// tryDiscoverDbusSessionBusAddress tries to discover an existing dbus session +// and return the value of its DBUS_SESSION_BUS_ADDRESS. +// It tries different techniques employed by different operating systems, +// returning the first valid address it finds, or an empty string. +// +// * /run/user//bus if this exists, it *is* the bus socket. present on +// Ubuntu 18.04 +// * /run/user//dbus-session: if this exists, it can be parsed for the bus +// address. present on Ubuntu 16.04 +// +// See https://dbus.freedesktop.org/doc/dbus-launch.1.html +func tryDiscoverDbusSessionBusAddress() string { + if runtimeDirectory, err := getRuntimeDirectory(); err == nil { + + if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) { + // if /run/user//bus exists, that file itself + // *is* the unix socket, so return its path + return fmt.Sprintf("unix:path=%s", runUserBusFile) + } + if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) { + // if /run/user//dbus-session exists, it's a + // text file // containing the address of the socket, e.g.: + // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG + + if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil { + fileContent := string(f) + + prefix := "DBUS_SESSION_BUS_ADDRESS=" + + if strings.HasPrefix(fileContent, prefix) { + address := strings.TrimRight(strings.TrimPrefix(fileContent, prefix), "\n\r") + return address + } + } + } + } + return "" +} + +func getRuntimeDirectory() (string, error) { + if currentUser, err := user.Current(); err != nil { + return "", err + } else { + return fmt.Sprintf("/run/user/%s", currentUser.Uid), nil + } +} + +func fileExists(filename string) bool { + if _, err := os.Stat(filename); !os.IsNotExist(err) { + return true + } else { + return false } - return defaultSystemBusAddress } diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_unix.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_unix.go new file mode 100644 index 00000000000..4cba8ae8ec3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_unix.go @@ -0,0 +1,18 @@ +//+build !windows,!solaris,!darwin + +package dbus + +import ( + "os" + "fmt" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress +} \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_windows.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_windows.go new file mode 100644 index 00000000000..4291e4519cc --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/conn_windows.go @@ -0,0 +1,15 @@ +//+build windows + +package dbus + +import "os" + +const defaultSystemBusAddress = "tcp:host=127.0.0.1,port=12434" + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return address + } + return defaultSystemBusAddress +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/decoder.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/decoder.go index ef50dcab98d..5c27d3b51f6 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/decoder.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/decoder.go @@ -191,7 +191,14 @@ func (dec *decoder) decode(s string, depth int) interface{} { length := dec.decode("u", depth).(uint32) v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) // Even for empty arrays, the correct padding must be included - dec.align(alignment(typeFor(s[1:]))) + align := alignment(typeFor(s[1:])) + if len(s) > 1 && s[1] == '(' { + //Special case for arrays of structs + //structs decode as a slice of interface{} values + //but the dbus alignment does not match this + align = 8 + } + dec.align(align) spos := dec.pos for dec.pos < spos+int(length) { ev := dec.decode(s[1:], depth+1) diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/default_handler.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/default_handler.go index e81f73ac522..81dbcc7e42a 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/default_handler.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/default_handler.go @@ -21,6 +21,8 @@ func newIntrospectIntf(h *defaultHandler) *exportedIntf { //NewDefaultHandler returns an instance of the default //call handler. This is useful if you want to implement only //one of the two handlers but not both. +// +// Deprecated: this is the default value, don't use it, it will be unexported. func NewDefaultHandler() *defaultHandler { h := &defaultHandler{ objects: make(map[ObjectPath]*exportedObj), @@ -161,6 +163,7 @@ func newExportedObject() *exportedObj { } type exportedObj struct { + mu sync.RWMutex interfaces map[string]*exportedIntf } @@ -168,19 +171,27 @@ func (obj *exportedObj) LookupInterface(name string) (Interface, bool) { if name == "" { return obj, true } + obj.mu.RLock() + defer obj.mu.RUnlock() intf, exists := obj.interfaces[name] return intf, exists } func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) { + obj.mu.Lock() + defer obj.mu.Unlock() obj.interfaces[name] = iface } func (obj *exportedObj) DeleteInterface(name string) { + obj.mu.Lock() + defer obj.mu.Unlock() delete(obj.interfaces, name) } func (obj *exportedObj) LookupMethod(name string) (Method, bool) { + obj.mu.RLock() + defer obj.mu.RUnlock() for _, intf := range obj.interfaces { method, exists := intf.LookupMethod(name) if exists { @@ -220,8 +231,12 @@ func (obj *exportedIntf) isFallbackInterface() bool { //NewDefaultSignalHandler returns an instance of the default //signal handler. This is useful if you want to implement only //one of the two handlers but not both. +// +// Deprecated: this is the default value, don't use it, it will be unexported. func NewDefaultSignalHandler() *defaultSignalHandler { - return &defaultSignalHandler{} + return &defaultSignalHandler{ + closeChan: make(chan struct{}), + } } func isDefaultSignalHandler(handler SignalHandler) bool { @@ -231,32 +246,47 @@ func isDefaultSignalHandler(handler SignalHandler) bool { type defaultSignalHandler struct { sync.RWMutex - closed bool - signals []chan<- *Signal + closed bool + signals []chan<- *Signal + closeChan chan struct{} } func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) { - go func() { - sh.RLock() - defer sh.RUnlock() - if sh.closed { + sh.RLock() + defer sh.RUnlock() + if sh.closed { + return + } + for _, ch := range sh.signals { + select { + case ch <- signal: + case <-sh.closeChan: return + default: + go func() { + select { + case ch <- signal: + case <-sh.closeChan: + return + } + }() } - for _, ch := range sh.signals { - ch <- signal - } - }() + } } func (sh *defaultSignalHandler) Init() error { sh.Lock() sh.signals = make([]chan<- *Signal, 0) + sh.closeChan = make(chan struct{}) sh.Unlock() return nil } func (sh *defaultSignalHandler) Terminate() { sh.Lock() + if !sh.closed { + close(sh.closeChan) + } sh.closed = true for _, ch := range sh.signals { close(ch) diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/export.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/export.go index aae97088152..95d0e29582d 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/export.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/export.go @@ -170,11 +170,8 @@ func (conn *Conn) handleCall(msg *Message) { reply.Body[i] = ret[i] } reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) - conn.outLck.RLock() - if !conn.closed { - conn.out <- reply - } - conn.outLck.RUnlock() + + conn.sendMessage(reply) } } @@ -207,12 +204,14 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } - conn.outLck.RLock() - defer conn.outLck.RUnlock() - if conn.closed { + + var closed bool + conn.sendMessageAndIfClosed(msg, func() { + closed = true + }) + if closed { return ErrClosed } - conn.out <- msg return nil } diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/go.mod b/cluster-autoscaler/vendor/github.com/godbus/dbus/go.mod new file mode 100644 index 00000000000..bdcd12598b4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/go.mod @@ -0,0 +1 @@ +module github.com/godbus/dbus diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/object.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/object.go index 6d95583d767..f27ffe144a8 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/object.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/object.go @@ -1,6 +1,7 @@ package dbus import ( + "context" "errors" "strings" ) @@ -9,7 +10,11 @@ import ( // invoked. type BusObject interface { Call(method string, flags Flags, args ...interface{}) *Call + CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call + GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call + AddMatchSignal(iface, member string, options ...MatchOption) *Call + RemoveMatchSignal(iface, member string, options ...MatchOption) *Call GetProperty(p string) (Variant, error) Destination() string Path() ObjectPath @@ -24,16 +29,73 @@ type Object struct { // Call calls a method with (*Object).Go and waits for its reply. func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { - return <-o.Go(method, flags, make(chan *Call, 1), args...).Done + return <-o.createCall(context.Background(), method, flags, make(chan *Call, 1), args...).Done } -// AddMatchSignal subscribes BusObject to signals from specified interface and -// method (member). -func (o *Object) AddMatchSignal(iface, member string) *Call { - return o.Call( +// CallWithContext acts like Call but takes a context +func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call { + return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done +} + +// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers. +// For full list of available options consult +// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules +type MatchOption struct { + key string + value string +} + +// WithMatchOption creates match option with given key and value +func WithMatchOption(key, value string) MatchOption { + return MatchOption{key, value} +} + +// WithMatchObjectPath creates match option that filters events based on given path +func WithMatchObjectPath(path ObjectPath) MatchOption { + return MatchOption{"path", string(path)} +} + +func formatMatchOptions(options []MatchOption) string { + items := make([]string, 0, len(options)) + for _, option := range options { + items = append(items, option.key+"='"+option.value+"'") + } + + return strings.Join(items, ",") +} + +// AddMatchSignal subscribes BusObject to signals from specified interface, +// method (member). Additional filter rules can be added via WithMatch* option constructors. +// Note: To filter events by object path you have to specify this path via an option. +func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call { + base := []MatchOption{ + {"type", "signal"}, + {"interface", iface}, + {"member", member}, + } + + options = append(base, options...) + return o.conn.BusObject().Call( "org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='"+iface+"',member='"+member+"'", + formatMatchOptions(options), + ) +} + +// RemoveMatchSignal unsubscribes BusObject from signals from specified interface, +// method (member). Additional filter rules can be added via WithMatch* option constructors +func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call { + base := []MatchOption{ + {"type", "signal"}, + {"interface", iface}, + {"member", member}, + } + + options = append(base, options...) + return o.conn.BusObject().Call( + "org.freedesktop.DBus.RemoveMatch", + 0, + formatMatchOptions(options), ) } @@ -49,6 +111,18 @@ func (o *Object) AddMatchSignal(iface, member string) *Call { // If the method parameter contains a dot ('.'), the part before the last dot // specifies the interface on which the method is called. func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + return o.createCall(context.Background(), method, flags, ch, args...) +} + +// GoWithContext acts like Go but takes a context +func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + return o.createCall(ctx, method, flags, ch, args...) +} + +func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + if ctx == nil { + panic("nil context") + } iface := "" i := strings.LastIndex(method, ".") if i != -1 { @@ -76,28 +150,28 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface } else if cap(ch) == 0 { panic("dbus: unbuffered channel passed to (*Object).Go") } + ctx, cancel := context.WithCancel(ctx) call := &Call{ Destination: o.dest, Path: o.path, Method: method, Args: args, Done: ch, + ctxCanceler: cancel, + ctx: ctx, } - o.conn.callsLck.Lock() - o.conn.calls[msg.serial] = call - o.conn.callsLck.Unlock() - o.conn.outLck.RLock() - if o.conn.closed { - call.Err = ErrClosed - call.Done <- call - } else { - o.conn.out <- msg - } - o.conn.outLck.RUnlock() + o.conn.calls.track(msg.serial, call) + o.conn.sendMessageAndIfClosed(msg, func() { + o.conn.calls.handleSendError(msg, ErrClosed) + cancel() + }) + go func() { + <-ctx.Done() + o.conn.calls.handleSendError(msg, ctx.Err()) + }() + return call } - o.conn.outLck.RLock() - defer o.conn.outLck.RUnlock() done := make(chan *Call, 1) call := &Call{ Err: nil, @@ -107,11 +181,9 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface call.Done <- call close(done) }() - if o.conn.closed { + o.conn.sendMessageAndIfClosed(msg, func() { call.Err = ErrClosed - return call - } - o.conn.out <- msg + }) return call } diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/server_interfaces.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/server_interfaces.go index 091948aef71..01166f0bd2a 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/server_interfaces.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/server_interfaces.go @@ -87,3 +87,13 @@ type SignalHandler interface { type DBusError interface { DBusError() (string, []interface{}) } + +// SerialGenerator is responsible for serials generation. +// +// Different approaches for the serial generation can be used, +// maintaining a map guarded with a mutex (the standard way) or +// simply increment an atomic counter. +type SerialGenerator interface { + GetSerial() uint32 + RetireSerial(serial uint32) +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_generic.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_generic.go index 3fad859a6b8..718a1ff0219 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_generic.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_generic.go @@ -11,7 +11,7 @@ var nativeEndian binary.ByteOrder func detectEndianness() binary.ByteOrder { var x uint32 = 0x01020304 - if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { return binary.BigEndian } return binary.LittleEndian diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_nonce_tcp.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_nonce_tcp.go new file mode 100644 index 00000000000..697739efafb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_nonce_tcp.go @@ -0,0 +1,39 @@ +//+build !windows + +package dbus + +import ( + "errors" + "io/ioutil" + "net" +) + +func init() { + transports["nonce-tcp"] = newNonceTcpTransport +} + +func newNonceTcpTransport(keys string) (transport, error) { + host := getKey(keys, "host") + port := getKey(keys, "port") + noncefile := getKey(keys, "noncefile") + if host == "" || port == "" || noncefile == "" { + return nil, errors.New("dbus: unsupported address (must set host, port and noncefile)") + } + protocol, err := tcpFamily(keys) + if err != nil { + return nil, err + } + socket, err := net.Dial(protocol, net.JoinHostPort(host, port)) + if err != nil { + return nil, err + } + b, err := ioutil.ReadFile(noncefile) + if err != nil { + return nil, err + } + _, err = socket.Write(b) + if err != nil { + return nil, err + } + return NewConn(socket) +} diff --git a/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_unix.go b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_unix.go index e56d5ca90aa..f000c6b5d45 100644 --- a/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_unix.go +++ b/cluster-autoscaler/vendor/github.com/godbus/dbus/transport_unix.go @@ -31,6 +31,7 @@ func (o *oobReader) Read(b []byte) (n int, err error) { type unixTransport struct { *net.UnixConn + rdr *oobReader hasUnixFDs bool } @@ -79,10 +80,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) { // To be sure that all bytes of out-of-band data are read, we use a special // reader that uses ReadUnix on the underlying connection instead of Read // and gathers the out-of-band data in a buffer. - rd := &oobReader{conn: t.UnixConn} + if t.rdr == nil { + t.rdr = &oobReader{conn: t.UnixConn} + } else { + t.rdr.oob = nil + } + // read the first 16 bytes (the part of the header that has a constant size), // from which we can figure out the length of the rest of the message - if _, err := io.ReadFull(rd, csheader[:]); err != nil { + if _, err := io.ReadFull(t.rdr, csheader[:]); err != nil { return nil, err } switch csheader[0] { @@ -104,7 +110,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) { // decode headers and look for unix fds headerdata := make([]byte, hlen+4) copy(headerdata, csheader[12:]) - if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil { return nil, err } dec := newDecoder(bytes.NewBuffer(headerdata), order) @@ -122,7 +128,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) { all := make([]byte, 16+hlen+blen) copy(all, csheader[:]) copy(all[16:], headerdata[4:]) - if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + if _, err := io.ReadFull(t.rdr, all[16+hlen:]); err != nil { return nil, err } if unixfds != 0 { @@ -130,7 +136,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) { return nil, errors.New("dbus: got unix fds on unsupported transport") } // read the fds from the OOB data - scms, err := syscall.ParseSocketControlMessage(rd.oob) + scms, err := syscall.ParseSocketControlMessage(t.rdr.oob) if err != nil { return nil, err } @@ -148,11 +154,23 @@ func (t *unixTransport) ReadMessage() (*Message, error) { // substitute the values in the message body (which are indices for the // array receiver via OOB) with the actual values for i, v := range msg.Body { - if j, ok := v.(UnixFDIndex); ok { + switch v.(type) { + case UnixFDIndex: + j := v.(UnixFDIndex) if uint32(j) >= unixfds { return nil, InvalidMessageError("invalid index for unix fd") } msg.Body[i] = UnixFD(fds[j]) + case []UnixFDIndex: + idxArray := v.([]UnixFDIndex) + fdArray := make([]UnixFD, len(idxArray)) + for k, j := range idxArray { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + fdArray[k] = UnixFD(fds[j]) + } + msg.Body[i] = fdArray } } return msg, nil diff --git a/cluster-autoscaler/vendor/github.com/golang/protobuf/proto/properties.go b/cluster-autoscaler/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c5c..a4b8c0cd3a8 100644 --- a/cluster-autoscaler/vendor/github.com/golang/protobuf/proto/properties.go +++ b/cluster-autoscaler/vendor/github.com/golang/protobuf/proto/properties.go @@ -38,7 +38,6 @@ package proto import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) { p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go index c84187f533e..1855e96599e 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go @@ -16,7 +16,6 @@ import ( "bytes" "encoding/json" "net/http" - "time" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" @@ -52,7 +51,7 @@ func (c *Client) BlockVolumeCreate(request *api.BlockVolumeCreateRequest) ( return nil, utils.GetErrorFromResponse(r) } - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return nil, err } @@ -148,7 +147,7 @@ func (c *Client) BlockVolumeDelete(id string) error { return utils.GetErrorFromResponse(r) } - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go index 74d6fe2739c..c8fd50cb793 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go @@ -34,8 +34,9 @@ const ( RETRY_COUNT = 6 // default delay values - MIN_DELAY = 10 - MAX_DELAY = 30 + MIN_DELAY = 10 + MAX_DELAY = 30 + POLL_DELAY = 400 // milliseconds ) type ClientTLSOptions struct { @@ -49,8 +50,10 @@ type ClientTLSOptions struct { type ClientOptions struct { RetryEnabled bool RetryCount int - // control waits between retries + // control waits between retries (seconds) RetryMinDelay, RetryMaxDelay int + // control wait time while polling for responses (milliseconds) + PollDelay int } // Client object @@ -75,6 +78,14 @@ var defaultClientOptions = ClientOptions{ RetryCount: RETRY_COUNT, RetryMinDelay: MIN_DELAY, RetryMaxDelay: MAX_DELAY, + PollDelay: POLL_DELAY, +} + +// DefaultClientOptions returns a ClientOptions type with all the fields +// initialized to the default values used internally by the new-client +// functions. +func DefaultClientOptions() ClientOptions { + return defaultClientOptions } // NewClient creates a new client to access a Heketi server @@ -195,6 +206,11 @@ func (c *Client) checkRedirect(req *http.Request, via []*http.Request) error { return c.setToken(req) } +func (c *Client) pollResponse(r *http.Response) (*http.Response, error) { + return c.waitForResponseWithTimer( + r, time.Millisecond*time.Duration(c.opts.PollDelay)) +} + // Wait for the job to finish, waiting waitTime on every loop func (c *Client) waitForResponseWithTimer(r *http.Response, waitTime time.Duration) (*http.Response, error) { diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go index fd856d8b476..32d3c27831e 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go @@ -17,7 +17,6 @@ import ( "encoding/json" "io" "net/http" - "time" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" @@ -54,7 +53,7 @@ func (c *Client) DeviceAdd(request *api.DeviceAddRequest) error { } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } @@ -138,7 +137,7 @@ func (c *Client) DeviceDeleteWithOptions( } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } @@ -184,7 +183,7 @@ func (c *Client) DeviceState(id string, } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } @@ -220,7 +219,7 @@ func (c *Client) DeviceResync(id string) error { } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Millisecond*250) + r, err = c.pollResponse(r) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go index da31379d199..4e66ac7a572 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go @@ -16,7 +16,6 @@ import ( "bytes" "encoding/json" "net/http" - "time" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" @@ -54,7 +53,7 @@ func (c *Client) NodeAdd(request *api.NodeAddRequest) (*api.NodeInfoResponse, er } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Millisecond*250) + r, err = c.pollResponse(r) if err != nil { return nil, err } @@ -131,7 +130,7 @@ func (c *Client) NodeDelete(id string) error { } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Millisecond*250) + r, err = c.pollResponse(r) if err != nil { return err } @@ -175,7 +174,7 @@ func (c *Client) NodeState(id string, request *api.StateRequest) error { } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go index a588b9eca54..f5eab65ddaa 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go @@ -16,7 +16,6 @@ import ( "bytes" "encoding/json" "net/http" - "time" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" @@ -151,7 +150,7 @@ func (c *Client) PendingOperationCleanUp( // AND that the rest async framework in heketi needs to be // polled in order to remove things from its map, the traditional // poll server after request behavior is retained here. - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go index 5b5d919ac5f..7092613f8b5 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go @@ -16,7 +16,6 @@ import ( "bytes" "encoding/json" "net/http" - "time" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" @@ -57,7 +56,7 @@ func (c *Client) VolumeCreate(request *api.VolumeCreateRequest) ( } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return nil, err } @@ -111,7 +110,7 @@ func (c *Client) VolumeSetBlockRestriction(id string, request *api.VolumeBlockRe } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return nil, err } @@ -165,7 +164,7 @@ func (c *Client) VolumeExpand(id string, request *api.VolumeExpandRequest) ( } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return nil, err } @@ -277,7 +276,7 @@ func (c *Client) VolumeDelete(id string) error { } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return err } @@ -319,7 +318,7 @@ func (c *Client) VolumeClone(id string, request *api.VolumeCloneRequest) (*api.V } // Wait for response - r, err = c.waitForResponseWithTimer(r, time.Second) + r, err = c.pollResponse(r) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go index c665607cdb7..a1bdf85f821 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go @@ -21,7 +21,7 @@ import ( "regexp" "sort" - "github.com/go-ozzo/ozzo-validation" + validation "github.com/go-ozzo/ozzo-validation" "github.com/go-ozzo/ozzo-validation/is" ) @@ -181,6 +181,8 @@ type DeviceInfo struct { Device Storage StorageSize `json:"storage"` Id string `json:"id"` + Paths []string `json:"paths,omitempty"` + PvUUID string `json:"pv_uuid,omitempty"` } type DeviceInfoResponse struct { diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter.go index 95ae54fbfe4..29b31cf7895 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_array.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_array.go index 6188cb4577a..204fe0e0922 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_array.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_object.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_object.go index 1c575767130..b65137114f6 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_object.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b69bd..9303de41e40 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect.go index 4459e203fb8..74974ba74b0 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_extension.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf1fed..e27e8d19179 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") if tag == "-" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_map.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_map.go index 547b4421e34..08e9a391251 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_map.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_marshaler.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea50719de9..3e21f375671 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641ac46b..5ad5cc561af 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh new file mode 100644 index 00000000000..646934bb404 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -n "$(gofmt -s -l .)" ]; then + echo "Go code is not properly formatted:" + gofmt -s -d -e . + exit 1 +fi diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/.travis.yml b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml similarity index 54% rename from cluster-autoscaler/vendor/github.com/pborman/uuid/.travis.yml rename to cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml index 3deb4a12430..d45dc3c918c 100644 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml @@ -1,10 +1,9 @@ language: go go: + - "1.8" - "1.9" - "1.10" - - "1.11" - - tip script: - - go test -v ./... + - make presubmit diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile index b329e964d41..65314b34b4a 100644 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile +++ b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile @@ -18,3 +18,6 @@ PKG=github.com/mindprince/gonvml build: docker run -v $(shell pwd):/go/src/$(PKG) --workdir=/go/src/$(PKG) golang:1.8 go build cmd/example/example.go +.PHONY: presubmit +presubmit: + ./.travis.gofmt.sh diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go index ae1711d85e1..abefe83eabf 100644 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go +++ b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go @@ -111,6 +111,38 @@ nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power) { return nvmlDeviceGetPowerUsageFunc(device, power); } +nvmlReturn_t (*nvmlDeviceGetTemperatureFunc)(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); +nvmlReturn_t nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp) { + if (nvmlDeviceGetTemperatureFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetTemperatureFunc(device, sensorType, temp); +} + +nvmlReturn_t (*nvmlDeviceGetFanSpeedFunc)(nvmlDevice_t device, unsigned int *speed); +nvmlReturn_t nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed) { + if (nvmlDeviceGetFanSpeedFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetFanSpeedFunc(device, speed); +} + +nvmlReturn_t (*nvmlDeviceGetEncoderUtilizationFunc)(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs); +nvmlReturn_t nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) { + if (nvmlDeviceGetEncoderUtilizationFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetEncoderUtilizationFunc(device, utilization, samplingPeriodUs); +} + +nvmlReturn_t (*nvmlDeviceGetDecoderUtilizationFunc)(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs); +nvmlReturn_t nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) { + if (nvmlDeviceGetDecoderUtilizationFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetDecoderUtilizationFunc(device, utilization, samplingPeriodUs); +} + nvmlReturn_t (*nvmlDeviceGetSamplesFunc)(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); // Loads the "libnvidia-ml.so.1" shared library. @@ -169,10 +201,26 @@ nvmlReturn_t nvmlInit_dl(void) { if (nvmlDeviceGetPowerUsageFunc == NULL) { return NVML_ERROR_FUNCTION_NOT_FOUND; } + nvmlDeviceGetTemperatureFunc = dlsym(nvmlHandle, "nvmlDeviceGetTemperature"); + if (nvmlDeviceGetTemperatureFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetFanSpeedFunc = dlsym(nvmlHandle, "nvmlDeviceGetFanSpeed"); + if (nvmlDeviceGetFanSpeedFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } nvmlDeviceGetSamplesFunc = dlsym(nvmlHandle, "nvmlDeviceGetSamples"); if (nvmlDeviceGetSamplesFunc == NULL) { return NVML_ERROR_FUNCTION_NOT_FOUND; } + nvmlDeviceGetEncoderUtilizationFunc = dlsym(nvmlHandle, "nvmlDeviceGetEncoderUtilization"); + if (nvmlDeviceGetEncoderUtilizationFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetDecoderUtilizationFunc = dlsym(nvmlHandle, "nvmlDeviceGetDecoderUtilization"); + if (nvmlDeviceGetDecoderUtilizationFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } nvmlReturn_t result = nvmlInitFunc(); if (result != NVML_SUCCESS) { dlclose(nvmlHandle); @@ -384,7 +432,7 @@ func (d Device) MemoryInfo() (uint64, uint64, error) { // UtilizationRates returns the percent of time over the past sample period during which: // utilization.gpu: one or more kernels were executing on the GPU. -// utilizatoin.memory: global (device) memory was being read or written. +// utilization.memory: global (device) memory was being read or written. func (d Device) UtilizationRates() (uint, uint, error) { if C.nvmlHandle == nil { return 0, 0, errLibraryNotLoaded @@ -429,3 +477,48 @@ func (d Device) AverageGPUUtilization(since time.Duration) (uint, error) { r := C.nvmlDeviceGetAverageUsage(d.dev, C.NVML_GPU_UTILIZATION_SAMPLES, lastTs, &n) return uint(n), errorString(r) } + +// Temperature returns the temperature for this GPU in Celsius. +func (d Device) Temperature() (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + var n C.uint + r := C.nvmlDeviceGetTemperature(d.dev, C.NVML_TEMPERATURE_GPU, &n) + return uint(n), errorString(r) +} + +// FanSpeed returns the temperature for this GPU in the percentage of its full +// speed, with 100 being the maximum. +func (d Device) FanSpeed() (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + var n C.uint + r := C.nvmlDeviceGetFanSpeed(d.dev, &n) + return uint(n), errorString(r) +} + +// EncoderUtilization returns the percent of time over the last sample period during which the GPU video encoder was being used. +// The sampling period is variable and is returned in the second return argument in microseconds. +func (d Device) EncoderUtilization() (uint, uint, error) { + if C.nvmlHandle == nil { + return 0, 0, errLibraryNotLoaded + } + var n C.uint + var sp C.uint + r := C.nvmlDeviceGetEncoderUtilization(d.dev, &n, &sp) + return uint(n), uint(sp), errorString(r) +} + +// DecoderUtilization returns the percent of time over the last sample period during which the GPU video decoder was being used. +// The sampling period is variable and is returned in the second return argument in microseconds. +func (d Device) DecoderUtilization() (uint, uint, error) { + if C.nvmlHandle == nil { + return 0, 0, errLibraryNotLoaded + } + var n C.uint + var sp C.uint + r := C.nvmlDeviceGetDecoderUtilization(d.dev, &n, &sp) + return uint(n), uint(sp), errorString(r) +} diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go new file mode 100644 index 00000000000..ddbec565077 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go @@ -0,0 +1,115 @@ +// +build !cgo + +package gonvml + +import ( + "errors" + "time" +) + +var errNoCgo = errors.New("this binary is built without CGO, NVML is disabled") + +// Initialize initializes NVML. +// Call this before calling any other methods. +func Initialize() error { + return errNoCgo +} + +// Shutdown shuts down NVML. +// Call this once NVML is no longer being used. +func Shutdown() error { + return errNoCgo +} + +// SystemDriverVersion returns the the driver version on the system. +func SystemDriverVersion() (string, error) { + return "", errNoCgo +} + +// DeviceCount returns the number of nvidia devices on the system. +func DeviceCount() (uint, error) { + return 0, errNoCgo +} + +// Device is the handle for the device. +// This handle is obtained by calling DeviceHandleByIndex(). +type Device struct { +} + +// DeviceHandleByIndex returns the device handle for a particular index. +// The indices range from 0 to DeviceCount()-1. The order in which NVML +// enumerates devices has no guarantees of consistency between reboots. +func DeviceHandleByIndex(idx uint) (Device, error) { + return Device{}, errNoCgo +} + +// MinorNumber returns the minor number for the device. +// The minor number for the device is such that the Nvidia device node +// file for each GPU will have the form /dev/nvidia[minor number]. +func (d Device) MinorNumber() (uint, error) { + return 0, errNoCgo +} + +// UUID returns the globally unique immutable UUID associated with this device. +func (d Device) UUID() (string, error) { + return "", errNoCgo +} + +// Name returns the product name of the device. +func (d Device) Name() (string, error) { + return "", errNoCgo +} + +// MemoryInfo returns the total and used memory (in bytes) of the device. +func (d Device) MemoryInfo() (uint64, uint64, error) { + return 0, 0, errNoCgo +} + +// UtilizationRates returns the percent of time over the past sample period during which: +// utilization.gpu: one or more kernels were executing on the GPU. +// utilizatoin.memory: global (device) memory was being read or written. +func (d Device) UtilizationRates() (uint, uint, error) { + return 0, 0, errNoCgo +} + +// PowerUsage returns the power usage for this GPU and its associated circuitry +// in milliwatts. The reading is accurate to within +/- 5% of current power draw. +func (d Device) PowerUsage() (uint, error) { + return 0, errNoCgo +} + +// AveragePowerUsage returns the power usage for this GPU and its associated circuitry +// in milliwatts averaged over the samples collected in the last `since` duration. +func (d Device) AveragePowerUsage(since time.Duration) (uint, error) { + return 0, errNoCgo +} + +// AverageGPUUtilization returns the utilization.gpu metric (percent of time +// one of more kernels were executing on the GPU) averaged over the samples +// collected in the last `since` duration. +func (d Device) AverageGPUUtilization(since time.Duration) (uint, error) { + return 0, errNoCgo +} + +// Temperature returns the temperature for this GPU in Celsius. +func (d Device) Temperature() (uint, error) { + return 0, errNoCgo +} + +// FanSpeed returns the temperature for this GPU in the percentage of its full +// speed, with 100 being the maximum. +func (d Device) FanSpeed() (uint, error) { + return 0, errNoCgo +} + +// EncoderUtilization returns the percent of time over the last sample period during which the GPU video encoder was being used. +// The sampling period is variable and is returned in the second return argument in microseconds. +func (d Device) EncoderUtilization() (uint, uint, error) { + return 0, 0, errNoCgo +} + +// DecoderUtilization returns the percent of time over the last sample period during which the GPU video decoder was being used. +// The sampling period is variable and is returned in the second return argument in microseconds. +func (d Device) DecoderUtilization() (uint, uint, error) { + return 0, 0, errNoCgo +} diff --git a/cluster-autoscaler/vendor/github.com/mrunalp/fileutils/fileutils.go b/cluster-autoscaler/vendor/github.com/mrunalp/fileutils/fileutils.go index b60cb909c5c..5a9818a24ec 100644 --- a/cluster-autoscaler/vendor/github.com/mrunalp/fileutils/fileutils.go +++ b/cluster-autoscaler/vendor/github.com/mrunalp/fileutils/fileutils.go @@ -139,19 +139,16 @@ func CopyDirectory(source string, dest string) error { return nil } - // Copy the file. - if err := CopyFile(path, filepath.Join(dest, relPath)); err != nil { - return err - } - - return nil + return CopyFile(path, filepath.Join(dest, relPath)) }) } +// Gives a number indicating the device driver to be used to access the passed device func major(device uint64) uint64 { return (device >> 8) & 0xfff } +// Gives a number that serves as a flag to the device driver for the passed device func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/LICENSE b/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/LICENSE similarity index 53% rename from cluster-autoscaler/vendor/github.com/pborman/uuid/LICENSE rename to cluster-autoscaler/vendor/github.com/munnerz/goautoneg/LICENSE index 5dc68268d90..bbc7b897cbc 100644 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/LICENSE +++ b/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/LICENSE @@ -1,24 +1,28 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY diff --git a/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/autoneg.go b/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/autoneg.go index 648b38cb654..1dd1cad6466 100644 --- a/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/autoneg.go +++ b/cluster-autoscaler/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -35,9 +35,8 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ + package goautoneg import ( @@ -53,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -76,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } - mrp := strings.Split(part, ";") + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { continue } - token := strings.Trim(sp[0], " ") + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/README.md b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/README.md index 1d7fa04c082..a791ca2d249 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/README.md +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/README.md @@ -261,6 +261,7 @@ process := &libcontainer.Process{ Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, + Init: true, } err := container.Run(process) diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go index 7fff0627fa1..debfc1e489e 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go @@ -6,6 +6,8 @@ import ( "fmt" "io/ioutil" "os" + + "github.com/opencontainers/runc/libcontainer/utils" ) // IsEnabled returns true if apparmor is enabled for the host. @@ -19,7 +21,7 @@ func IsEnabled() bool { return false } -func setprocattr(attr, value string) error { +func setProcAttr(attr, value string) error { // Under AppArmor you can only change your own attr, so use /proc/self/ // instead of /proc// like libapparmor does path := fmt.Sprintf("/proc/self/attr/%s", attr) @@ -30,6 +32,10 @@ func setprocattr(attr, value string) error { } defer f.Close() + if err := utils.EnsureProcHandle(f); err != nil { + return err + } + _, err = fmt.Fprintf(f, "%s", value) return err } @@ -37,7 +43,7 @@ func setprocattr(attr, value string) error { // changeOnExec reimplements aa_change_onexec from libapparmor in Go func changeOnExec(name string) error { value := "exec " + name - if err := setprocattr("exec", value); err != nil { + if err := setProcAttr("exec", value); err != nil { return fmt.Errorf("apparmor failed to apply profile: %s", err) } return nil diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go index 7c66f572580..9daef29e480 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go @@ -71,7 +71,11 @@ func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilitie } ambient = append(ambient, v) } - pid, err := capability.NewPid(0) + pid, err := capability.NewPid2(0) + if err != nil { + return nil, err + } + err = pid.Load() if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index f672ba27377..512fd700104 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -18,7 +18,7 @@ import ( ) var ( - subsystems = subsystemSet{ + subsystemsLegacy = subsystemSet{ &CpusetGroup{}, &DevicesGroup{}, &MemoryGroup{}, @@ -33,6 +33,14 @@ var ( &FreezerGroup{}, &NameGroup{GroupName: "name=systemd", Join: true}, } + subsystemsUnified = subsystemSet{ + &CpusetGroupV2{}, + &FreezerGroupV2{}, + &CpuGroupV2{}, + &MemoryGroupV2{}, + &IOGroupV2{}, + &PidsGroupV2{}, + } HugePageSizes, _ = cgroups.GetHugePageSize() ) @@ -129,6 +137,13 @@ func isIgnorableError(rootless bool, err error) bool { return errno == unix.EROFS || errno == unix.EPERM || errno == unix.EACCES } +func (m *Manager) getSubsystems() subsystemSet { + if cgroups.IsCgroup2UnifiedMode() { + return subsystemsUnified + } + return subsystemsLegacy +} + func (m *Manager) Apply(pid int) (err error) { if m.Cgroups == nil { return nil @@ -158,7 +173,7 @@ func (m *Manager) Apply(pid int) (err error) { return cgroups.EnterPid(m.Paths, pid) } - for _, sys := range subsystems { + for _, sys := range m.getSubsystems() { // TODO: Apply should, ideally, be reentrant or be broken up into a separate // create and join phase so that the cgroup hierarchy for a container can be // created then join consists of writing the process pids to cgroup.procs @@ -214,7 +229,7 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { defer m.mu.Unlock() stats := cgroups.NewStats() for name, path := range m.Paths { - sys, err := subsystems.Get(name) + sys, err := m.getSubsystems().Get(name) if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) { continue } @@ -226,14 +241,18 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { } func (m *Manager) Set(container *configs.Config) error { + if container.Cgroups == nil { + return nil + } + // If Paths are set, then we are just joining cgroups paths // and there is no need to set any values. - if m.Cgroups.Paths != nil { + if m.Cgroups != nil && m.Cgroups.Paths != nil { return nil } paths := m.GetPaths() - for _, sys := range subsystems { + for _, sys := range m.getSubsystems() { path := paths[sys.Name()] if err := sys.Set(path, container.Cgroups); err != nil { if m.Rootless && sys.Name() == "devices" { @@ -262,11 +281,15 @@ func (m *Manager) Set(container *configs.Config) error { // Freeze toggles the container's freezer cgroup depending on the state // provided func (m *Manager) Freeze(state configs.FreezerState) error { + if m.Cgroups == nil { + return errors.New("cannot toggle freezer: cgroups not configured for container") + } + paths := m.GetPaths() dir := paths["freezer"] prevState := m.Cgroups.Resources.Freezer m.Cgroups.Resources.Freezer = state - freezer, err := subsystems.Get("freezer") + freezer, err := m.getSubsystems().Get("freezer") if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go new file mode 100644 index 00000000000..245071ae504 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go @@ -0,0 +1,92 @@ +// +build linux + +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" +) + +type CpuGroupV2 struct { +} + +func (s *CpuGroupV2) Name() string { + return "cpu" +} + +func (s *CpuGroupV2) Apply(d *cgroupData) error { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + path, err := d.path("cpu") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return s.ApplyDir(path, d.config, d.pid) +} + +func (s *CpuGroupV2) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error { + // This might happen if we have no cpu cgroup mounted. + // Just do nothing and don't fail. + if path == "" { + return nil + } + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + return cgroups.WriteCgroupProc(path, pid) +} + +func (s *CpuGroupV2) Set(path string, cgroup *configs.Cgroup) error { + if cgroup.Resources.CpuWeight != 0 { + if err := writeFile(path, "cpu.weight", strconv.FormatUint(cgroup.Resources.CpuWeight, 10)); err != nil { + return err + } + } + + if cgroup.Resources.CpuMax != "" { + if err := writeFile(path, "cpu.max", cgroup.Resources.CpuMax); err != nil { + return err + } + } + + return nil +} + +func (s *CpuGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("cpu")) +} + +func (s *CpuGroupV2) GetStats(path string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "usage_usec": + stats.CpuStats.CpuUsage.TotalUsage = v * 1000 + + case "user_usec": + stats.CpuStats.CpuUsage.UsageInUsermode = v * 1000 + + case "system_usec": + stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000 + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go new file mode 100644 index 00000000000..fa88cfb7d6e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go @@ -0,0 +1,159 @@ +// +build linux + +package fs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" +) + +type CpusetGroupV2 struct { +} + +func (s *CpusetGroupV2) Name() string { + return "cpuset" +} + +func (s *CpusetGroupV2) Apply(d *cgroupData) error { + dir, err := d.path("cpuset") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return s.ApplyDir(dir, d.config, d.pid) +} + +func (s *CpusetGroupV2) Set(path string, cgroup *configs.Cgroup) error { + if cgroup.Resources.CpusetCpus != "" { + if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { + return err + } + } + if cgroup.Resources.CpusetMems != "" { + if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("cpuset")) +} + +func (s *CpusetGroupV2) GetStats(path string, stats *cgroups.Stats) error { + return nil +} + +func (s *CpusetGroupV2) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error { + // This might happen if we have no cpuset cgroup mounted. + // Just do nothing and don't fail. + if dir == "" { + return nil + } + mountInfo, err := ioutil.ReadFile("/proc/self/mountinfo") + if err != nil { + return err + } + root := filepath.Dir(cgroups.GetClosestMountpointAncestor(dir, string(mountInfo))) + // 'ensureParent' start with parent because we don't want to + // explicitly inherit from parent, it could conflict with + // 'cpuset.cpu_exclusive'. + if err := s.ensureParent(filepath.Dir(dir), root); err != nil { + return err + } + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // We didn't inherit cpuset configs from parent, but we have + // to ensure cpuset configs are set before moving task into the + // cgroup. + // The logic is, if user specified cpuset configs, use these + // specified configs, otherwise, inherit from parent. This makes + // cpuset configs work correctly with 'cpuset.cpu_exclusive', and + // keep backward compatibility. + if err := s.ensureCpusAndMems(dir, cgroup); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + return cgroups.WriteCgroupProc(dir, pid) +} + +func (s *CpusetGroupV2) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus.effective")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems.effective")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent makes sure that the parent directory of current is created +// and populated with the proper cpus and mems files copied from +// it's parent. +func (s *CpusetGroupV2) ensureParent(current, root string) error { + parent := filepath.Dir(current) + if libcontainerUtils.CleanPath(parent) == root { + return nil + } + // Avoid infinite recursion. + if parent == current { + return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") + } + if err := s.ensureParent(parent, root); err != nil { + return err + } + if err := os.MkdirAll(current, 0755); err != nil { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *CpusetGroupV2) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroupV2) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} + +func (s *CpusetGroupV2) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error { + if err := s.Set(path, cgroup); err != nil { + return err + } + return s.copyIfNeeded(path, filepath.Dir(path)) +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go new file mode 100644 index 00000000000..186de9ab41f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go @@ -0,0 +1,74 @@ +// +build linux + +package fs + +import ( + "fmt" + "strings" + "time" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" +) + +type FreezerGroupV2 struct { +} + +func (s *FreezerGroupV2) Name() string { + return "freezer" +} + +func (s *FreezerGroupV2) Apply(d *cgroupData) error { + _, err := d.join("freezer") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *FreezerGroupV2) Set(path string, cgroup *configs.Cgroup) error { + var desiredState string + filename := "cgroup.freeze" + if cgroup.Resources.Freezer == configs.Frozen { + desiredState = "1" + } else { + desiredState = "0" + } + + switch cgroup.Resources.Freezer { + case configs.Frozen, configs.Thawed: + for { + // In case this loop does not exit because it doesn't get the expected + // state, let's write again this state, hoping it's going to be properly + // set this time. Otherwise, this loop could run infinitely, waiting for + // a state change that would never happen. + if err := writeFile(path, filename, desiredState); err != nil { + return err + } + + state, err := readFile(path, filename) + if err != nil { + return err + } + if strings.TrimSpace(state) == desiredState { + break + } + + time.Sleep(1 * time.Millisecond) + } + case configs.Undefined: + return nil + default: + return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer)) + } + + return nil +} + +func (s *FreezerGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("freezer")) +} + +func (s *FreezerGroupV2) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go new file mode 100644 index 00000000000..84b1829451a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go @@ -0,0 +1,191 @@ +// +build linux + +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" +) + +type IOGroupV2 struct { +} + +func (s *IOGroupV2) Name() string { + return "blkio" +} + +func (s *IOGroupV2) Apply(d *cgroupData) error { + _, err := d.join("blkio") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *IOGroupV2) Set(path string, cgroup *configs.Cgroup) error { + cgroupsv2 := cgroups.IsCgroup2UnifiedMode() + + if cgroup.Resources.BlkioWeight != 0 { + filename := "blkio.weight" + if cgroupsv2 { + filename = "io.bfq.weight" + } + if err := writeFile(path, filename, strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { + return err + } + } + + if cgroup.Resources.BlkioLeafWeight != 0 { + if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil { + return err + } + } + for _, wd := range cgroup.Resources.BlkioWeightDevice { + if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil { + return err + } + if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil { + return err + } + } + for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice { + if cgroupsv2 { + if err := writeFile(path, "io.max", td.StringName("rbps")); err != nil { + return err + } + } else { + if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil { + return err + } + } + } + for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice { + if cgroupsv2 { + if err := writeFile(path, "io.max", td.StringName("wbps")); err != nil { + return err + } + } else { + if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil { + return err + } + } + } + for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice { + if cgroupsv2 { + if err := writeFile(path, "io.max", td.StringName("riops")); err != nil { + return err + } + } else { + if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil { + return err + } + } + } + for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice { + if cgroupsv2 { + if err := writeFile(path, "io.max", td.StringName("wiops")); err != nil { + return err + } + } else { + if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil { + return err + } + } + } + + return nil +} + +func (s *IOGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("blkio")) +} + +func readCgroup2MapFile(path string, name string) (map[string][]string, error) { + ret := map[string][]string{} + p := filepath.Join("/sys/fs/cgroup", path, name) + f, err := os.Open(p) + if err != nil { + if os.IsNotExist(err) { + return ret, nil + } + return nil, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + ret[parts[0]] = parts[1:] + } + if err := scanner.Err(); err != nil { + return nil, err + } + return ret, nil +} + +func (s *IOGroupV2) getCgroupV2Stats(path string, stats *cgroups.Stats) error { + // more details on the io.stat file format: https://www.kernel.org/doc/Documentation/cgroup-v2.txt + var ioServiceBytesRecursive []cgroups.BlkioStatEntry + values, err := readCgroup2MapFile(path, "io.stat") + if err != nil { + return err + } + for k, v := range values { + d := strings.Split(k, ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + for _, item := range v { + d := strings.Split(item, "=") + if len(d) != 2 { + continue + } + op := d[0] + + // Accommodate the cgroup v1 naming + switch op { + case "rbytes": + op = "read" + case "wbytes": + op = "write" + } + + value, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + entry := cgroups.BlkioStatEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + } + stats.BlkioStats = cgroups.BlkioStats{IoServiceBytesRecursive: ioServiceBytesRecursive} + return nil +} + +func (s *IOGroupV2) GetStats(path string, stats *cgroups.Stats) error { + return s.getCgroupV2Stats(path, stats) +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go new file mode 100644 index 00000000000..2ad997bcc29 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go @@ -0,0 +1,164 @@ +// +build linux + +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" +) + +type MemoryGroupV2 struct { +} + +func (s *MemoryGroupV2) Name() string { + return "memory" +} + +func (s *MemoryGroupV2) Apply(d *cgroupData) (err error) { + path, err := d.path("memory") + if err != nil && !cgroups.IsNotFound(err) { + return err + } else if path == "" { + return nil + } + if memoryAssigned(d.config) { + if _, err := os.Stat(path); os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + // Only enable kernel memory accouting when this cgroup + // is created by libcontainer, otherwise we might get + // error when people use `cgroupsPath` to join an existed + // cgroup whose kernel memory is not initialized. + if err := EnableKernelMemoryAccounting(path); err != nil { + return err + } + } + } + defer func() { + if err != nil { + os.RemoveAll(path) + } + }() + + // We need to join memory cgroup after set memory limits, because + // kmem.limit_in_bytes can only be set when the cgroup is empty. + _, err = d.join("memory") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func setMemoryAndSwapCgroups(path string, cgroup *configs.Cgroup) error { + if cgroup.Resources.MemorySwap != 0 { + if err := writeFile(path, "memory.swap.max", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { + return err + } + } + if cgroup.Resources.Memory != 0 { + if err := writeFile(path, "memory.max", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { + return err + } + } + return nil +} + +func (s *MemoryGroupV2) Set(path string, cgroup *configs.Cgroup) error { + + if err := setMemoryAndSwapCgroups(path, cgroup); err != nil { + return err + } + + if cgroup.Resources.KernelMemory != 0 { + if err := setKernelMemory(path, cgroup.Resources.KernelMemory); err != nil { + return err + } + } + + if cgroup.Resources.MemoryReservation != 0 { + if err := writeFile(path, "memory.high", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { + return err + } + } + + return nil +} + +func (s *MemoryGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("memory")) +} + +func (s *MemoryGroupV2) GetStats(path string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) + } + stats.MemoryStats.Stats[t] = v + } + stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"] + + memoryUsage, err := getMemoryDataV2(path, "") + if err != nil { + return err + } + stats.MemoryStats.Usage = memoryUsage + swapUsage, err := getMemoryDataV2(path, "swap") + if err != nil { + return err + } + stats.MemoryStats.SwapUsage = swapUsage + + stats.MemoryStats.UseHierarchy = true + return nil +} + +func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { + memoryData := cgroups.MemoryData{} + + moduleName := "memory" + if name != "" { + moduleName = strings.Join([]string{"memory", name}, ".") + } + usage := strings.Join([]string{moduleName, "current"}, ".") + limit := strings.Join([]string{moduleName, "max"}, ".") + + value, err := getCgroupParamUint(path, usage) + if err != nil { + if moduleName != "memory" && os.IsNotExist(err) { + return cgroups.MemoryData{}, nil + } + return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err) + } + memoryData.Usage = value + + value, err = getCgroupParamUint(path, limit) + if err != nil { + if moduleName != "memory" && os.IsNotExist(err) { + return cgroups.MemoryData{}, nil + } + return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err) + } + memoryData.Limit = value + + return memoryData, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go new file mode 100644 index 00000000000..3413a2a0d18 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go @@ -0,0 +1,107 @@ +// +build linux + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/unix" +) + +type PidsGroupV2 struct { +} + +func (s *PidsGroupV2) Name() string { + return "pids" +} + +func (s *PidsGroupV2) Apply(d *cgroupData) error { + _, err := d.join("pids") + if err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *PidsGroupV2) Set(path string, cgroup *configs.Cgroup) error { + if cgroup.Resources.PidsLimit != 0 { + // "max" is the fallback value. + limit := "max" + + if cgroup.Resources.PidsLimit > 0 { + limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10) + } + + if err := writeFile(path, "pids.max", limit); err != nil { + return err + } + } + + return nil +} + +func (s *PidsGroupV2) Remove(d *cgroupData) error { + return removePath(d.path("pids")) +} + +func isNOTSUP(err error) bool { + switch err := err.(type) { + case *os.PathError: + return err.Err == unix.ENOTSUP + default: + return false + } +} + +func (s *PidsGroupV2) GetStats(path string, stats *cgroups.Stats) error { + current, err := getCgroupParamUint(path, "pids.current") + if os.IsNotExist(err) { + // if the controller is not enabled, let's read the list + // PIDs (or threads if cgroup.threads is enabled) + contents, err := ioutil.ReadFile(filepath.Join(path, "cgroup.procs")) + if err != nil && isNOTSUP(err) { + contents, err = ioutil.ReadFile(filepath.Join(path, "cgroup.threads")) + } + if err != nil { + return err + } + pids := make(map[string]string) + for _, i := range strings.Split(string(contents), "\n") { + if i != "" { + pids[i] = i + } + } + stats.PidsStats.Current = uint64(len(pids)) + stats.PidsStats.Limit = 0 + return nil + + } + if err != nil { + return fmt.Errorf("failed to parse pids.current - %s", err) + } + + maxString, err := getCgroupParamString(path, "pids.max") + if err != nil { + return fmt.Errorf("failed to parse pids.max - %s", err) + } + + // Default if pids.max == "max" is 0 -- which represents "no limit". + var max uint64 + if maxString != "max" { + max, err = parseUint(maxString, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max")) + } + } + + stats.PidsStats.Current = current + stats.PidsStats.Limit = max + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go index 5ff0a161504..30922777f53 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "path/filepath" "strconv" "strings" @@ -59,8 +60,12 @@ func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { if err != nil { return 0, err } + trimmed := strings.TrimSpace(string(contents)) + if trimmed == "max" { + return math.MaxUint64, nil + } - res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64) + res, err := parseUint(trimmed, 10, 64) if err != nil { return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName) } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go index 3bf723bf964..f274a49a3c1 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go @@ -14,7 +14,6 @@ import ( "time" systemdDbus "github.com/coreos/go-systemd/dbus" - systemdUtil "github.com/coreos/go-systemd/util" "github.com/godbus/dbus" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" @@ -22,7 +21,7 @@ import ( "github.com/sirupsen/logrus" ) -type Manager struct { +type LegacyManager struct { mu sync.Mutex Cgroups *configs.Cgroup Paths map[string]string @@ -50,7 +49,7 @@ func (s subsystemSet) Get(name string) (subsystem, error) { return nil, errSubsystemDoesNotExist } -var subsystems = subsystemSet{ +var legacySubsystems = subsystemSet{ &fs.CpusetGroup{}, &fs.DevicesGroup{}, &fs.MemoryGroup{}, @@ -72,11 +71,8 @@ const ( ) var ( - connLock sync.Mutex - theConn *systemdDbus.Conn - hasStartTransientUnit bool - hasStartTransientSliceUnit bool - hasDelegateSlice bool + connLock sync.Mutex + theConn *systemdDbus.Conn ) func newProp(name string, units interface{}) systemdDbus.Property { @@ -86,8 +82,23 @@ func newProp(name string, units interface{}) systemdDbus.Property { } } +// NOTE: This function comes from package github.com/coreos/go-systemd/util +// It was borrowed here to avoid a dependency on cgo. +// +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similarly to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func isRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} + func UseSystemd() bool { - if !systemdUtil.IsRunningSystemd() { + if !isRunningSystemd() { return false } @@ -100,82 +111,31 @@ func UseSystemd() bool { if err != nil { return false } - - // Assume we have StartTransientUnit - hasStartTransientUnit = true - - // But if we get UnknownMethod error we don't - if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil { - if dbusError, ok := err.(dbus.Error); ok { - if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { - hasStartTransientUnit = false - return hasStartTransientUnit - } - } - } - - // Assume we have the ability to start a transient unit as a slice - // This was broken until systemd v229, but has been back-ported on RHEL environments >= 219 - // For details, see: https://bugzilla.redhat.com/show_bug.cgi?id=1370299 - hasStartTransientSliceUnit = true - - // To ensure simple clean-up, we create a slice off the root with no hierarchy - slice := fmt.Sprintf("libcontainer_%d_systemd_test_default.slice", os.Getpid()) - if _, err := theConn.StartTransientUnit(slice, "replace", nil, nil); err != nil { - if _, ok := err.(dbus.Error); ok { - hasStartTransientSliceUnit = false - } - } - - for i := 0; i <= testSliceWait; i++ { - if _, err := theConn.StopUnit(slice, "replace", nil); err != nil { - if dbusError, ok := err.(dbus.Error); ok { - if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") { - hasStartTransientSliceUnit = false - break - } - } - } else { - break - } - time.Sleep(time.Millisecond) - } - - // Not critical because of the stop unit logic above. - theConn.StopUnit(slice, "replace", nil) - - // Assume StartTransientUnit on a slice allows Delegate - hasDelegateSlice = true - dlSlice := newProp("Delegate", true) - if _, err := theConn.StartTransientUnit(slice, "replace", []systemdDbus.Property{dlSlice}, nil); err != nil { - if dbusError, ok := err.(dbus.Error); ok { - // Starting with systemd v237, Delegate is not even a property of slices anymore, - // so the D-Bus call fails with "InvalidArgs" error. - if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") { - hasDelegateSlice = false - } - } - } - - // Not critical because of the stop unit logic above. - theConn.StopUnit(slice, "replace", nil) } - return hasStartTransientUnit + return true } func NewSystemdCgroupsManager() (func(config *configs.Cgroup, paths map[string]string) cgroups.Manager, error) { - if !systemdUtil.IsRunningSystemd() { + if !isRunningSystemd() { return nil, fmt.Errorf("systemd not running on this host, can't use systemd as a cgroups.Manager") } + if cgroups.IsCgroup2UnifiedMode() { + return func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { + return &UnifiedManager{ + Cgroups: config, + Paths: paths, + } + }, nil + } return func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &Manager{ + return &LegacyManager{ Cgroups: config, Paths: paths, } }, nil } -func (m *Manager) Apply(pid int) error { +func (m *LegacyManager) Apply(pid int) error { var ( c = m.Cgroups unitName = getUnitName(c) @@ -208,10 +168,6 @@ func (m *Manager) Apply(pid int) error { // if we create a slice, the parent is defined via a Wants= if strings.HasSuffix(unitName, ".slice") { - // This was broken until systemd v229, but has been back-ported on RHEL environments >= 219 - if !hasStartTransientSliceUnit { - return fmt.Errorf("systemd version does not support ability to start a slice as transient unit") - } properties = append(properties, systemdDbus.PropWants(slice)) } else { // otherwise, we use Slice= @@ -224,12 +180,7 @@ func (m *Manager) Apply(pid int) error { } // Check if we can delegate. This is only supported on systemd versions 218 and above. - if strings.HasSuffix(unitName, ".slice") { - if hasDelegateSlice { - // systemd 237 and above no longer allows delegation on a slice - properties = append(properties, newProp("Delegate", true)) - } - } else { + if !strings.HasSuffix(unitName, ".slice") { // Assume scopes always support delegation. properties = append(properties, newProp("Delegate", true)) } @@ -310,7 +261,7 @@ func (m *Manager) Apply(pid int) error { } paths := make(map[string]string) - for _, s := range subsystems { + for _, s := range legacySubsystems { subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name()) if err != nil { // Don't fail if a cgroup hierarchy was not found, just skip this subsystem @@ -325,7 +276,7 @@ func (m *Manager) Apply(pid int) error { return nil } -func (m *Manager) Destroy() error { +func (m *LegacyManager) Destroy() error { if m.Cgroups.Paths != nil { return nil } @@ -339,7 +290,7 @@ func (m *Manager) Destroy() error { return nil } -func (m *Manager) GetPaths() map[string]string { +func (m *LegacyManager) GetPaths() map[string]string { m.mu.Lock() paths := m.Paths m.mu.Unlock() @@ -351,6 +302,7 @@ func join(c *configs.Cgroup, subsystem string, pid int) (string, error) { if err != nil { return "", err } + if err := os.MkdirAll(path, 0755); err != nil { return "", err } @@ -361,7 +313,7 @@ func join(c *configs.Cgroup, subsystem string, pid int) (string, error) { } func joinCgroups(c *configs.Cgroup, pid int) error { - for _, sys := range subsystems { + for _, sys := range legacySubsystems { name := sys.Name() switch name { case "name=systemd": @@ -456,14 +408,14 @@ func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) { return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil } -func (m *Manager) Freeze(state configs.FreezerState) error { +func (m *LegacyManager) Freeze(state configs.FreezerState) error { path, err := getSubsystemPath(m.Cgroups, "freezer") if err != nil { return err } prevState := m.Cgroups.Resources.Freezer m.Cgroups.Resources.Freezer = state - freezer, err := subsystems.Get("freezer") + freezer, err := legacySubsystems.Get("freezer") if err != nil { return err } @@ -475,7 +427,7 @@ func (m *Manager) Freeze(state configs.FreezerState) error { return nil } -func (m *Manager) GetPids() ([]int, error) { +func (m *LegacyManager) GetPids() ([]int, error) { path, err := getSubsystemPath(m.Cgroups, "devices") if err != nil { return nil, err @@ -483,7 +435,7 @@ func (m *Manager) GetPids() ([]int, error) { return cgroups.GetPids(path) } -func (m *Manager) GetAllPids() ([]int, error) { +func (m *LegacyManager) GetAllPids() ([]int, error) { path, err := getSubsystemPath(m.Cgroups, "devices") if err != nil { return nil, err @@ -491,12 +443,12 @@ func (m *Manager) GetAllPids() ([]int, error) { return cgroups.GetAllPids(path) } -func (m *Manager) GetStats() (*cgroups.Stats, error) { +func (m *LegacyManager) GetStats() (*cgroups.Stats, error) { m.mu.Lock() defer m.mu.Unlock() stats := cgroups.NewStats() for name, path := range m.Paths { - sys, err := subsystems.Get(name) + sys, err := legacySubsystems.Get(name) if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) { continue } @@ -508,13 +460,13 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { return stats, nil } -func (m *Manager) Set(container *configs.Config) error { +func (m *LegacyManager) Set(container *configs.Config) error { // If Paths are set, then we are just joining cgroups paths // and there is no need to set any values. if m.Cgroups.Paths != nil { return nil } - for _, sys := range subsystems { + for _, sys := range legacySubsystems { // Get the subsystem path, but don't error out for not found cgroups. path, err := getSubsystemPath(container.Cgroups, sys.Name()) if err != nil && !cgroups.IsNotFound(err) { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go new file mode 100644 index 00000000000..d394e3a5a8f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go @@ -0,0 +1,329 @@ +// +build linux,!static_build + +package systemd + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strings" + "sync" + "time" + + systemdDbus "github.com/coreos/go-systemd/dbus" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/sirupsen/logrus" +) + +type UnifiedManager struct { + mu sync.Mutex + Cgroups *configs.Cgroup + Paths map[string]string +} + +var unifiedSubsystems = subsystemSet{ + &fs.CpusetGroupV2{}, + &fs.FreezerGroupV2{}, + &fs.CpuGroupV2{}, + &fs.MemoryGroupV2{}, + &fs.IOGroupV2{}, + &fs.PidsGroupV2{}, +} + +func (m *UnifiedManager) Apply(pid int) error { + var ( + c = m.Cgroups + unitName = getUnitName(c) + slice = "system.slice" + properties []systemdDbus.Property + ) + + if c.Paths != nil { + paths := make(map[string]string) + for name, path := range c.Paths { + _, err := getSubsystemPath(m.Cgroups, name) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + return err + } + paths[name] = path + } + m.Paths = paths + return cgroups.EnterPid(m.Paths, pid) + } + + if c.Parent != "" { + slice = c.Parent + } + + properties = append(properties, systemdDbus.PropDescription("libcontainer container "+c.Name)) + + // if we create a slice, the parent is defined via a Wants= + if strings.HasSuffix(unitName, ".slice") { + properties = append(properties, systemdDbus.PropWants(slice)) + } else { + // otherwise, we use Slice= + properties = append(properties, systemdDbus.PropSlice(slice)) + } + + // only add pid if its valid, -1 is used w/ general slice creation. + if pid != -1 { + properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) + } + + // Check if we can delegate. This is only supported on systemd versions 218 and above. + if !strings.HasSuffix(unitName, ".slice") { + // Assume scopes always support delegation. + properties = append(properties, newProp("Delegate", true)) + } + + // Always enable accounting, this gets us the same behaviour as the fs implementation, + // plus the kernel has some problems with joining the memory cgroup at a later time. + properties = append(properties, + newProp("MemoryAccounting", true), + newProp("CPUAccounting", true), + newProp("BlockIOAccounting", true)) + + // Assume DefaultDependencies= will always work (the check for it was previously broken.) + properties = append(properties, + newProp("DefaultDependencies", false)) + + if c.Resources.Memory != 0 { + properties = append(properties, + newProp("MemoryLimit", uint64(c.Resources.Memory))) + } + + if c.Resources.CpuShares != 0 { + properties = append(properties, + newProp("CPUShares", c.Resources.CpuShares)) + } + + // cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd. + if c.Resources.CpuQuota != 0 && c.Resources.CpuPeriod != 0 { + // corresponds to USEC_INFINITY in systemd + // if USEC_INFINITY is provided, CPUQuota is left unbound by systemd + // always setting a property value ensures we can apply a quota and remove it later + cpuQuotaPerSecUSec := uint64(math.MaxUint64) + if c.Resources.CpuQuota > 0 { + // systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to CPUQuota + // (integer percentage of CPU) internally. This means that if a fractional percent of + // CPU is indicated by Resources.CpuQuota, we need to round up to the nearest + // 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect. + cpuQuotaPerSecUSec = uint64(c.Resources.CpuQuota*1000000) / c.Resources.CpuPeriod + if cpuQuotaPerSecUSec%10000 != 0 { + cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 + } + } + properties = append(properties, + newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec)) + } + + if c.Resources.BlkioWeight != 0 { + properties = append(properties, + newProp("BlockIOWeight", uint64(c.Resources.BlkioWeight))) + } + + if c.Resources.PidsLimit > 0 { + properties = append(properties, + newProp("TasksAccounting", true), + newProp("TasksMax", uint64(c.Resources.PidsLimit))) + } + + // We have to set kernel memory here, as we can't change it once + // processes have been attached to the cgroup. + if c.Resources.KernelMemory != 0 { + if err := setKernelMemory(c); err != nil { + return err + } + } + + statusChan := make(chan string, 1) + if _, err := theConn.StartTransientUnit(unitName, "replace", properties, statusChan); err == nil { + select { + case <-statusChan: + case <-time.After(time.Second): + logrus.Warnf("Timed out while waiting for StartTransientUnit(%s) completion signal from dbus. Continuing...", unitName) + } + } else if !isUnitExists(err) { + return err + } + + if err := joinCgroupsV2(c, pid); err != nil { + return err + } + + paths := make(map[string]string) + for _, s := range unifiedSubsystems { + subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name()) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + return err + } + paths[s.Name()] = subsystemPath + } + m.Paths = paths + return nil +} + +func (m *UnifiedManager) Destroy() error { + if m.Cgroups.Paths != nil { + return nil + } + m.mu.Lock() + defer m.mu.Unlock() + theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil) + if err := cgroups.RemovePaths(m.Paths); err != nil { + return err + } + m.Paths = make(map[string]string) + return nil +} + +func (m *UnifiedManager) GetPaths() map[string]string { + m.mu.Lock() + paths := m.Paths + m.mu.Unlock() + return paths +} + +func createCgroupsv2Path(path string) (Err error) { + content, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") + if err != nil { + return err + } + if !filepath.HasPrefix(path, "/sys/fs/cgroup") { + return fmt.Errorf("invalid cgroup path %s", path) + } + + res := "" + for i, c := range strings.Split(strings.TrimSpace(string(content)), " ") { + if i == 0 { + res = fmt.Sprintf("+%s", c) + } else { + res = res + fmt.Sprintf(" +%s", c) + } + } + resByte := []byte(res) + + current := "/sys/fs" + elements := strings.Split(path, "/") + for i, e := range elements[3:] { + current = filepath.Join(current, e) + if i > 0 { + if err := os.Mkdir(current, 0755); err != nil { + if !os.IsExist(err) { + return err + } + } else { + // If the directory was created, be sure it is not left around on errors. + defer func() { + if Err != nil { + os.Remove(current) + } + }() + } + } + if i < len(elements[3:])-1 { + if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), resByte, 0755); err != nil { + return err + } + } + } + return nil +} + +func joinCgroupsV2(c *configs.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "memory") + if err != nil { + return err + } + return createCgroupsv2Path(path) +} + +func (m *UnifiedManager) Freeze(state configs.FreezerState) error { + path, err := getSubsystemPath(m.Cgroups, "freezer") + if err != nil { + return err + } + prevState := m.Cgroups.Resources.Freezer + m.Cgroups.Resources.Freezer = state + freezer, err := unifiedSubsystems.Get("freezer") + if err != nil { + return err + } + err = freezer.Set(path, m.Cgroups) + if err != nil { + m.Cgroups.Resources.Freezer = prevState + return err + } + return nil +} + +func (m *UnifiedManager) GetPids() ([]int, error) { + path, err := getSubsystemPath(m.Cgroups, "devices") + if err != nil { + return nil, err + } + return cgroups.GetPids(path) +} + +func (m *UnifiedManager) GetAllPids() ([]int, error) { + path, err := getSubsystemPath(m.Cgroups, "devices") + if err != nil { + return nil, err + } + return cgroups.GetAllPids(path) +} + +func (m *UnifiedManager) GetStats() (*cgroups.Stats, error) { + m.mu.Lock() + defer m.mu.Unlock() + stats := cgroups.NewStats() + for name, path := range m.Paths { + sys, err := unifiedSubsystems.Get(name) + if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) { + continue + } + if err := sys.GetStats(path, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +func (m *UnifiedManager) Set(container *configs.Config) error { + // If Paths are set, then we are just joining cgroups paths + // and there is no need to set any values. + if m.Cgroups.Paths != nil { + return nil + } + for _, sys := range unifiedSubsystems { + // Get the subsystem path, but don't error out for not found cgroups. + path, err := getSubsystemPath(container.Cgroups, sys.Name()) + if err != nil && !cgroups.IsNotFound(err) { + return err + } + + if err := sys.Set(path, container.Cgroups); err != nil { + return err + } + } + + if m.Paths["cpu"] != "" { + if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil { + return err + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index ec79ae76723..60790f83b4e 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -11,6 +11,8 @@ import ( "path/filepath" "strconv" "strings" + "sync" + "syscall" "time" units "github.com/docker/go-units" @@ -22,6 +24,11 @@ const ( CgroupProcesses = "cgroup.procs" ) +var ( + isUnifiedOnce sync.Once + isUnified bool +) + // HugePageSizeUnitList is a list of the units used by the linux kernel when // naming the HugePage control files. // https://www.kernel.org/doc/Documentation/cgroup-v1/hugetlb.txt @@ -29,6 +36,18 @@ const ( // depends on https://github.com/docker/go-units/commit/a09cd47f892041a4fac473133d181f5aea6fa393 var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"} +// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. +func IsCgroup2UnifiedMode() bool { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + panic("cannot statfs cgroup root") + } + isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC + }) + return isUnified +} + // https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) @@ -49,6 +68,10 @@ func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, } defer f.Close() + if IsCgroup2UnifiedMode() { + subsystem = "" + } + return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem) } @@ -57,12 +80,12 @@ func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsyst for scanner.Scan() { txt := scanner.Text() fields := strings.Fields(txt) - if len(fields) < 5 { + if len(fields) < 9 { continue } if strings.HasPrefix(fields[4], cgroupPath) { for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if opt == subsystem { + if (subsystem == "" && fields[9] == "cgroup2") || opt == subsystem { return fields[4], fields[3], nil } } @@ -76,6 +99,19 @@ func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsyst } func isSubsystemAvailable(subsystem string) bool { + if IsCgroup2UnifiedMode() { + controllers, err := GetAllSubsystems() + if err != nil { + return false + } + for _, c := range controllers { + if c == subsystem { + return true + } + } + return false + } + cgroups, err := ParseCgroupFile("/proc/self/cgroup") if err != nil { return false @@ -120,7 +156,7 @@ func FindCgroupMountpointDir() (string, error) { return "", fmt.Errorf("Found no fields post '-' in %q", text) } - if postSeparatorFields[0] == "cgroup" { + if postSeparatorFields[0] == "cgroup" || postSeparatorFields[0] == "cgroup2" { // Check that the mount is properly formatted. if numPostFields < 3 { return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) @@ -193,6 +229,19 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, // GetCgroupMounts returns the mounts for the cgroup subsystems. // all indicates whether to return just the first instance or all the mounts. func GetCgroupMounts(all bool) ([]Mount, error) { + if IsCgroup2UnifiedMode() { + availableControllers, err := GetAllSubsystems() + if err != nil { + return nil, err + } + m := Mount{ + Mountpoint: "/sys/fs/cgroup", + Root: "/sys/fs/cgroup", + Subsystems: availableControllers, + } + return []Mount{m}, nil + } + f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err @@ -356,6 +405,9 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) { } func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { + if IsCgroup2UnifiedMode() { + return "/", nil + } if p, ok := cgroups[subsystem]; ok { return p, nil diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go index e0f3ca16533..fa195bf90f8 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go @@ -59,3 +59,8 @@ func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice { func (td *ThrottleDevice) String() string { return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate) } + +// StringName formats the struct to be writable to the cgroup specific file +func (td *ThrottleDevice) StringName(name string) string { + return fmt.Sprintf("%d:%d %s=%d", td.Major, td.Minor, name, td.Rate) +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index e15a662f522..58ed19c9e78 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -119,4 +119,12 @@ type Resources struct { // Set class identifier for container's network packets NetClsClassid uint32 `json:"net_cls_classid_u"` + + // Used on cgroups v2: + + // CpuWeight sets a proportional bandwidth limit. + CpuWeight uint64 `json:"cpu_weight"` + + // CpuMax sets she maximum bandwidth limit (format: max period). + CpuMax string `json:"cpu_max"` } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go similarity index 89% rename from cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go rename to cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go index d74847b0db8..c0c23d70004 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux + package configs // TODO Windows: This can ultimately be entirely factored out on Windows as diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 7728522fef6..24989e9f534 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -44,6 +44,7 @@ const ( Trap Allow Trace + Log ) // Operator is a comparison operator to be used when matching syscall arguments in Seccomp diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index d6c4ebdaa10..6ff4d96a5f5 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -19,7 +19,7 @@ import ( "syscall" // only for SysProcAttr and Signal "time" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/intelrdt" @@ -1176,7 +1176,7 @@ func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error { if err != nil { return err } - if err := checkMountDestination(c.config.Rootfs, dest); err != nil { + if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil { return err } m.Destination = dest @@ -1814,7 +1814,14 @@ func (c *linuxContainer) isPaused() (bool, error) { // A container doesn't have a freezer cgroup return false, nil } - data, err := ioutil.ReadFile(filepath.Join(fcg, "freezer.state")) + pausedState := "FROZEN" + filename := "freezer.state" + if cgroups.IsCgroup2UnifiedMode() { + filename = "cgroup.freeze" + pausedState = "1" + } + + data, err := ioutil.ReadFile(filepath.Join(fcg, filename)) if err != nil { // If freezer cgroup is not mounted, the container would just be not paused. if os.IsNotExist(err) { @@ -1822,7 +1829,7 @@ func (c *linuxContainer) isPaused() (bool, error) { } return false, newSystemErrorWithCause(err, "checking if container is paused") } - return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil + return bytes.Equal(bytes.TrimSpace(data), []byte(pausedState)), nil } func (c *linuxContainer) currentState() (*State, error) { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index f13b226e444..10888b499be 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/mrunalp/fileutils" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" @@ -197,7 +197,7 @@ func prepareBindMount(m *configs.Mount, rootfs string) error { if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } - if err := checkMountDestination(rootfs, dest); err != nil { + if err := checkProcMount(rootfs, dest, m.Source); err != nil { return err } // update the mount with the correct dest after symlinks are resolved. @@ -209,6 +209,80 @@ func prepareBindMount(m *configs.Mount, rootfs string) error { return nil } +func mountCgroupV1(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error { + binds, err := getCgroupMounts(m) + if err != nil { + return err + } + var merged []string + for _, b := range binds { + ss := filepath.Base(b.Destination) + if strings.Contains(ss, ",") { + merged = append(merged, ss) + } + } + tmpfs := &configs.Mount{ + Source: "tmpfs", + Device: "tmpfs", + Destination: m.Destination, + Flags: defaultMountFlags, + Data: "mode=755", + PropagationFlags: m.PropagationFlags, + } + if err := mountToRootfs(tmpfs, rootfs, mountLabel, enableCgroupns); err != nil { + return err + } + for _, b := range binds { + if enableCgroupns { + subsystemPath := filepath.Join(rootfs, b.Destination) + if err := os.MkdirAll(subsystemPath, 0755); err != nil { + return err + } + flags := defaultMountFlags + if m.Flags&unix.MS_RDONLY != 0 { + flags = flags | unix.MS_RDONLY + } + cgroupmount := &configs.Mount{ + Source: "cgroup", + Device: "cgroup", + Destination: subsystemPath, + Flags: flags, + Data: filepath.Base(subsystemPath), + } + if err := mountNewCgroup(cgroupmount); err != nil { + return err + } + } else { + if err := mountToRootfs(b, rootfs, mountLabel, enableCgroupns); err != nil { + return err + } + } + } + for _, mc := range merged { + for _, ss := range strings.Split(mc, ",") { + // symlink(2) is very dumb, it will just shove the path into + // the link and doesn't do any checks or relative path + // conversion. Also, don't error out if the cgroup already exists. + if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) { + return err + } + } + } + return nil +} + +func mountCgroupV2(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error { + cgroupPath, err := securejoin.SecureJoin(rootfs, m.Destination) + if err != nil { + return err + } + if err := os.MkdirAll(cgroupPath, 0755); err != nil { + return err + } + + return unix.Mount(m.Source, cgroupPath, "cgroup2", uintptr(m.Flags), m.Data) +} + func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error { var ( dest = m.Destination @@ -309,62 +383,14 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b } } case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - var merged []string - for _, b := range binds { - ss := filepath.Base(b.Destination) - if strings.Contains(ss, ",") { - merged = append(merged, ss) - } - } - tmpfs := &configs.Mount{ - Source: "tmpfs", - Device: "tmpfs", - Destination: m.Destination, - Flags: defaultMountFlags, - Data: "mode=755", - PropagationFlags: m.PropagationFlags, - } - if err := mountToRootfs(tmpfs, rootfs, mountLabel, enableCgroupns); err != nil { - return err - } - for _, b := range binds { - if enableCgroupns { - subsystemPath := filepath.Join(rootfs, b.Destination) - if err := os.MkdirAll(subsystemPath, 0755); err != nil { - return err - } - flags := defaultMountFlags - if m.Flags&unix.MS_RDONLY != 0 { - flags = flags | unix.MS_RDONLY - } - cgroupmount := &configs.Mount{ - Source: "cgroup", - Device: "cgroup", - Destination: subsystemPath, - Flags: flags, - Data: filepath.Base(subsystemPath), - } - if err := mountNewCgroup(cgroupmount); err != nil { - return err - } - } else { - if err := mountToRootfs(b, rootfs, mountLabel, enableCgroupns); err != nil { - return err - } + if cgroups.IsCgroup2UnifiedMode() { + if err := mountCgroupV2(m, rootfs, mountLabel, enableCgroupns); err != nil { + return err } - } - for _, mc := range merged { - for _, ss := range strings.Split(mc, ",") { - // symlink(2) is very dumb, it will just shove the path into - // the link and doesn't do any checks or relative path - // conversion. Also, don't error out if the cgroup already exists. - if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) { - return err - } + } else { + + if err := mountCgroupV1(m, rootfs, mountLabel, enableCgroupns); err != nil { + return err } } if m.Flags&unix.MS_RDONLY != 0 { @@ -388,7 +414,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } - if err := checkMountDestination(rootfs, dest); err != nil { + if err := checkProcMount(rootfs, dest, m.Source); err != nil { return err } // update the mount with the correct dest after symlinks are resolved. @@ -435,12 +461,12 @@ func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { return binds, nil } -// checkMountDestination checks to ensure that the mount destination is not over the top of /proc. +// checkProcMount checks to ensure that the mount destination is not over the top of /proc. // dest is required to be an abs path and have any symlinks resolved before calling this function. -func checkMountDestination(rootfs, dest string) error { - invalidDestinations := []string{ - "/proc", - } +// +// if source is nil, don't stat the filesystem. This is used for restore of a checkpoint. +func checkProcMount(rootfs, dest, source string) error { + const procPath = "/proc" // White list, it should be sub directories of invalid destinations validDestinations := []string{ // These entries can be bind mounted by files emulated by fuse, @@ -463,16 +489,40 @@ func checkMountDestination(rootfs, dest string) error { return nil } } - for _, invalid := range invalidDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) + path, err := filepath.Rel(filepath.Join(rootfs, procPath), dest) + if err != nil { + return err + } + // pass if the mount path is located outside of /proc + if strings.HasPrefix(path, "..") { + return nil + } + if path == "." { + // an empty source is pasted on restore + if source == "" { + return nil + } + // only allow a mount on-top of proc if it's source is "proc" + isproc, err := isProc(source) if err != nil { return err } - if path != "." && !strings.HasPrefix(path, "..") { - return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) + // pass if the mount is happening on top of /proc and the source of + // the mount is a proc filesystem + if isproc { + return nil } + return fmt.Errorf("%q cannot be mounted because it is not of type proc", dest) } - return nil + return fmt.Errorf("%q cannot be mounted because it is inside /proc", dest) +} + +func isProc(path string) (bool, error) { + var s unix.Statfs_t + if err := unix.Statfs(path, &s); err != nil { + return false, err + } + return s.Type == unix.PROC_SUPER_MAGIC, nil } func setupDevSymlinks(rootfs string) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go index ded5a6bbc8b..c3212279875 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go @@ -22,6 +22,7 @@ var actions = map[string]configs.Action{ "SCMP_ACT_TRAP": configs.Trap, "SCMP_ACT_ALLOW": configs.Allow, "SCMP_ACT_TRACE": configs.Trace, + "SCMP_ACT_LOG": configs.Log, } var archs = map[string]string{ diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go index d99f3fe640c..1b7a07118fe 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go @@ -19,6 +19,7 @@ var ( actTrap = libseccomp.ActTrap actKill = libseccomp.ActKill actTrace = libseccomp.ActTrace.SetReturnCode(int16(unix.EPERM)) + actLog = libseccomp.ActLog actErrno = libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM)) ) @@ -112,6 +113,8 @@ func getAction(act configs.Action) (libseccomp.ScmpAction, error) { return actAllow, nil case configs.Trace: return actTrace, nil + case configs.Log: + return actLog, nil default: return libseccomp.ActInvalid, fmt.Errorf("invalid action, cannot use in rule") } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index 11c3faafbf0..e05e30adc3b 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,5 +1,5 @@ // +build linux -// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x package system diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index c96088988a6..1576f2d4ab6 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -3,33 +3,57 @@ package utils import ( - "io/ioutil" + "fmt" "os" "strconv" "golang.org/x/sys/unix" ) +// EnsureProcHandle returns whether or not the given file handle is on procfs. +func EnsureProcHandle(fh *os.File) error { + var buf unix.Statfs_t + if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil { + return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err) + } + if buf.Type != unix.PROC_SUPER_MAGIC { + return fmt.Errorf("%s is not on procfs", fh.Name()) + } + return nil +} + +// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for +// the process (except for those below the given fd value). func CloseExecFrom(minFd int) error { - fdList, err := ioutil.ReadDir("/proc/self/fd") + fdDir, err := os.Open("/proc/self/fd") + if err != nil { + return err + } + defer fdDir.Close() + + if err := EnsureProcHandle(fdDir); err != nil { + return err + } + + fdList, err := fdDir.Readdirnames(-1) if err != nil { return err } - for _, fi := range fdList { - fd, err := strconv.Atoi(fi.Name()) + for _, fdStr := range fdList { + fd, err := strconv.Atoi(fdStr) + // Ignore non-numeric file names. if err != nil { - // ignore non-numeric file names continue } - + // Ignore descriptors lower than our specified minimum. if fd < minFd { - // ignore descriptors lower than our specified minimum continue } - - // intentionally ignore errors from unix.CloseOnExec + // Intentionally ignore errors from unix.CloseOnExec -- the cases where + // this might fail are basically file descriptors that have already + // been closed (including and especially the one that was created when + // ioutil.ReadDir did the "opendir" syscall). unix.CloseOnExec(fd) - // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) } return nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go index 1eb9a6bf252..2730fcf4a9a 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go @@ -13,11 +13,12 @@ import ( // Valid Label Options var validOptions = map[string]bool{ - "disable": true, - "type": true, - "user": true, - "role": true, - "level": true, + "disable": true, + "type": true, + "filetype": true, + "user": true, + "role": true, + "level": true, } var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be used together") @@ -51,13 +52,16 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) { return "", mountLabel, nil } if i := strings.Index(opt, ":"); i == -1 { - return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt) + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) } con := strings.SplitN(opt, ":", 2) if !validOptions[con[0]] { - return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type'", con[0]) + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) } + if con[0] == "filetype" { + mcon["type"] = con[1] + } pcon[con[0]] = con[1] if con[0] == "level" || con[0] == "user" { mcon[con[0]] = con[1] diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index d7786c33c19..8cdf1b054ac 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -18,6 +18,8 @@ import ( "strings" "sync" "syscall" + + "golang.org/x/sys/unix" ) const ( @@ -252,6 +254,12 @@ func getSELinuxPolicyRoot() string { return filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) } +func isProcHandle(fh *os.File) (bool, error) { + var buf unix.Statfs_t + err := unix.Fstatfs(int(fh.Fd()), &buf) + return buf.Type == unix.PROC_SUPER_MAGIC, err +} + func readCon(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath @@ -263,6 +271,12 @@ func readCon(fpath string) (string, error) { } defer in.Close() + if ok, err := isProcHandle(in); err != nil { + return "", err + } else if !ok { + return "", fmt.Errorf("%s not on procfs", fpath) + } + var retval string if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { return "", err @@ -345,6 +359,12 @@ func writeCon(fpath string, val string) error { } defer out.Close() + if ok, err := isProcHandle(out); err != nil { + return err + } else if !ok { + return fmt.Errorf("%s not on procfs", fpath) + } + if val != "" { _, err = out.Write([]byte(val)) } else { @@ -392,6 +412,14 @@ func SetExecLabel(label string) error { return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label) } +/* +SetTaskLabel sets the SELinux label for the current thread, or an error. +This requires the dyntransition permission. +*/ +func SetTaskLabel(label string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()), label) +} + // SetSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created func SetSocketLabel(label string) error { @@ -403,6 +431,11 @@ func SocketLabel() (string, error) { return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid())) } +// PeerLabel retrieves the label of the client on the other side of a socket +func PeerLabel(fd uintptr) (string, error) { + return unix.GetsockoptString(int(fd), syscall.SOL_SOCKET, syscall.SO_PEERSEC) +} + // SetKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func SetKeyLabel(label string) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 79b005d194c..0c2e1cd38e7 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -96,6 +96,14 @@ func SetExecLabel(label string) error { return nil } +/* +SetTaskLabel sets the SELinux label for the current thread, or an error. +This requires the dyntransition permission. +*/ +func SetTaskLabel(label string) error { + return nil +} + /* SetSocketLabel sets the SELinux label that the kernel will use for any programs that are executed by the current process thread, or an error. @@ -109,6 +117,11 @@ func SocketLabel() (string, error) { return "", nil } +// PeerLabel retrieves the label of the client on the other side of a socket +func PeerLabel(fd uintptr) (string, error) { + return "", nil +} + // SetKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func SetKeyLabel(label string) error { diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f136..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTORS b/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04eda9..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/README.md b/cluster-autoscaler/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index 810ad40dc93..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/dce.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/dce.go deleted file mode 100644 index 50a0f2d0992..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/doc.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/doc.go deleted file mode 100644 index 727d7616745..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. -package uuid diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/go.mod b/cluster-autoscaler/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de0d5..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/go.sum b/cluster-autoscaler/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9c3f..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/hash.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1ef3a..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/marshal.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/marshal.go deleted file mode 100644 index 35b89352ad7..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/marshal.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "errors" - "fmt" - - guuid "github.com/google/uuid" -) - -// MarshalText implements encoding.TextMarshaler. -func (u UUID) MarshalText() ([]byte, error) { - if len(u) != 16 { - return nil, nil - } - var js [36]byte - encodeHex(js[:], u) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *UUID) UnmarshalText(data []byte) error { - if len(data) == 0 { - return nil - } - id := Parse(string(data)) - if id == nil { - return errors.New("invalid UUID") - } - *u = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u UUID) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return nil - } - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - var id [16]byte - copy(id[:], data) - *u = id[:] - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (u Array) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], u[:]) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err - } - *u = Array(id) - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u Array) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *Array) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(u[:], data) - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/node.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/node.go deleted file mode 100644 index e524e0101b4..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return guuid.NodeInterface() -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - return guuid.NodeID() -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/sql.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index 929c3847e2a..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/time.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/time.go deleted file mode 100644 index 5c0960d8723..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - - guuid "github.com/google/uuid" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time = guuid.Time - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { return guuid.ClockSequence() } - -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/util.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index 255b5e24859..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/uuid.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 33700042072..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "io" - - guuid "github.com/google/uuid" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return guuid.UUID(uuid).String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version = guuid.Version - -// A Variant represents a UUIDs variant. -type Variant = guuid.Variant - -// Constants returned by Variant. -const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. -func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] - } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil - } - return nil, err -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - guuid.SetRand(r) -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/version1.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 7af948da793..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/pborman/uuid/version4.go b/cluster-autoscaler/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index b459d46d13d..00000000000 --- a/cluster-autoscaler/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import guuid "github.com/google/uuid" - -// Random returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/pkg/errors/.travis.yml b/cluster-autoscaler/vendor/github.com/pkg/errors/.travis.yml index 588ceca183f..d4b92663bac 100644 --- a/cluster-autoscaler/vendor/github.com/pkg/errors/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/pkg/errors/.travis.yml @@ -1,10 +1,14 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.3 - - 1.5.4 - - 1.6.2 - - 1.7.1 + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x - tip script: diff --git a/cluster-autoscaler/vendor/github.com/pkg/errors/README.md b/cluster-autoscaler/vendor/github.com/pkg/errors/README.md index 273db3c98ae..6483ba2afb5 100644 --- a/cluster-autoscaler/vendor/github.com/pkg/errors/README.md +++ b/cluster-autoscaler/vendor/github.com/pkg/errors/README.md @@ -1,4 +1,4 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) Package errors provides simple error handling primitives. @@ -47,6 +47,6 @@ We welcome pull requests, bug fixes and issue reports. With that said, the bar f Before proposing a change, please discuss your change by raising an issue. -## Licence +## License BSD-2-Clause diff --git a/cluster-autoscaler/vendor/github.com/pkg/errors/errors.go b/cluster-autoscaler/vendor/github.com/pkg/errors/errors.go index 842ee80456d..7421f326ffe 100644 --- a/cluster-autoscaler/vendor/github.com/pkg/errors/errors.go +++ b/cluster-autoscaler/vendor/github.com/pkg/errors/errors.go @@ -6,7 +6,7 @@ // return err // } // -// which applied recursively up the call stack results in error reports +// which when applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. @@ -15,16 +15,17 @@ // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, -// and the supplied message. For example +// together with the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // -// If additional control is required the errors.WithStack and errors.WithMessage -// functions destructure errors.Wrap into its component operations of annotating -// an error with a stack trace and an a message, respectively. +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. // // Retrieving the cause of an error // @@ -38,7 +39,7 @@ // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error which does not implement causer, which is assumed to be +// the topmost error that does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { @@ -48,16 +49,16 @@ // // unknown error // } // -// causer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported +// be formatted by the fmt package. The following verbs are supported: // // %s print the error. If the error has a Cause it will be -// printed recursively +// printed recursively. // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. @@ -65,13 +66,13 @@ // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface. +// invoked. This information can be retrieved with the following interface: // // type stackTracer interface { // StackTrace() errors.StackTrace // } // -// Where errors.StackTrace is defined as +// The returned errors.StackTrace type is defined as // // type StackTrace []Frame // @@ -85,8 +86,8 @@ // } // } // -// stackTracer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. // // See the documentation for Frame.Format for more details. package errors @@ -192,7 +193,7 @@ func Wrap(err error, message string) error { } // Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is call, and the format specifier. +// at the point Wrapf is called, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { @@ -220,6 +221,18 @@ func WithMessage(err error, message string) error { } } +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + type withMessage struct { cause error msg string diff --git a/cluster-autoscaler/vendor/github.com/pkg/errors/stack.go b/cluster-autoscaler/vendor/github.com/pkg/errors/stack.go index 6b1f2891a5a..2874a048cf3 100644 --- a/cluster-autoscaler/vendor/github.com/pkg/errors/stack.go +++ b/cluster-autoscaler/vendor/github.com/pkg/errors/stack.go @@ -46,7 +46,8 @@ func (f Frame) line() int { // // Format accepts flags that alter the printing of some verbs, as follows: // -// %+s path of source file relative to the compile time GOPATH +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { @@ -79,6 +80,14 @@ func (f Frame) Format(s fmt.State, verb rune) { // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -136,43 +145,3 @@ func funcname(name string) string { i = strings.Index(name, ".") return name[i+1:] } - -func trimGOPATH(name, file string) string { - // Here we want to get the source file path relative to the compile time - // GOPATH. As of Go 1.6.x there is no direct way to know the compiled - // GOPATH at runtime, but we can infer the number of path segments in the - // GOPATH. We note that fn.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired output. We count separators from the end of the file - // path until it finds two more than in the function name and then move - // one character forward to preserve the initial path segment without a - // leading separator. - const sep = "/" - goal := strings.Count(name, sep) + 2 - i := len(file) - for n := 0; n < goal; n++ { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - // not enough separators found, set i so that the slice expression - // below leaves file unmodified - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - file = file[i+len(sep):] - return file -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 00000000000..288f0e85488 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_stub.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go similarity index 57% rename from cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_stub.go rename to cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go index 477589e1227..6609e2877c0 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_stub.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -1,10 +1,9 @@ -// Copyright 2016 CoreOS, Inc. -// +// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,12 +11,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !cgo - -package util - -func getRunningSlice() (string, error) { return "", ErrNoCGO } +// +build !go1.12 -func runningFromSystemService() (bool, error) { return false, ErrNoCGO } +package prometheus -func currentUnitName() (string, error) { return "", ErrNoCGO } +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/collector.go index c0d70b2faf1..1e839650d4d 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -79,7 +79,7 @@ type Collector interface { // of the Describe method. If a Collector sometimes collects no metrics at all // (for example vectors like CounterVec, GaugeVec, etc., which only collect // metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register +// it might even get registered as an unchecked Collector (cf. the Register // method of the Registerer interface). Hence, only use this shortcut // implementation of Describe if you are certain to fulfill the contract. // diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 5d9525defc8..01977de6614 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -122,13 +122,13 @@ // the Collect method. The Describe method has to return separate Desc // instances, representative of the “throw-away” metrics to be created later. // NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still // be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact +// sometimes necessary. The typical use case is a situation where the exact // metrics to be returned by a Collector cannot be predicted at registration // time, but the implementer has sufficient knowledge of the whole system to // guarantee metric consistency. @@ -183,7 +183,6 @@ // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) // // Pushing to the Pushgateway // diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ba3b9333edd..dc9247fed97 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -14,9 +14,9 @@ package prometheus import ( - "fmt" "runtime" "runtime/debug" + "sync" "time" ) @@ -26,16 +26,41 @@ type goCollector struct { gcDesc *Desc goInfoDesc *Desc - // metrics to describe and collect - metrics memStatsMetrics + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector which exports metrics about the current Go +// NewGoCollector returns a collector that exports metrics about the current Go // process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This causes a stop-the-world, which is very short with Go1.9+ -// (~25µs). However, with older Go versions, the stop-the-world duration depends -// on the heap size and can be quite significant (~1.7 ms/GiB as per +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per // https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( @@ -54,7 +79,11 @@ func NewGoCollector() Collector { "go_info", "Information about the Go environment.", nil, Labels{"version": runtime.Version()}), - metrics: memStatsMetrics{ + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -253,7 +282,7 @@ func NewGoCollector() Collector { } func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) + return "go_memstats_" + s } // Describe returns all descriptions of the collector. @@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) { ch <- c.threadsDesc ch <- c.gcDesc ch <- c.goInfoDesc - for _, i := range c.metrics { + for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) n, _ := runtime.ThreadCreateProfile(nil) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) @@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -299,3 +364,33 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f88da707bc8..d7ea67bd2ba 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) @@ -224,18 +224,21 @@ type histogramCounts struct { } type histogram struct { - // countAndHotIdx is a complicated one. For lock-free yet atomic - // observations, we need to save the total count of observations again, - // combined with the index of the currently-hot counts struct, so that - // we can perform the operation on both values atomically. The least - // significant bit defines the hot counts struct. The remaining 63 bits - // represent the total count of observations. This happens under the - // assumption that the 63bit count will never overflow. Rationale: An - // observations takes about 30ns. Let's assume it could happen in - // 10ns. Overflowing the counter will then take at least (2^63)*10ns, - // which is about 3000 years. + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. // - // This has to be first in the struct for 64bit alignment. See + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: // http://golang.org/pkg/sync/atomic/#pkg-note-BUG countAndHotIdx uint64 @@ -243,16 +246,14 @@ type histogram struct { desc *Desc writeMtx sync.Mutex // Only used in the Write method. - upperBounds []float64 - // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of // pointers to guarantee 64bit alignment of the histogramCounts, see // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - hotIdx int // Index of currently-hot counts. Only used within Write. - labelPairs []*dto.LabelPair + upperBounds []float64 + labelPairs []*dto.LabelPair } func (h *histogram) Desc() *Desc { @@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) { // 300 buckets: 154 ns/op linear - binary 61.6 ns/op i := sort.SearchFloat64s(h.upperBounds, v) - // We increment h.countAndHotIdx by 2 so that the counter in the upper - // 63 bits gets incremented by 1. At the same time, we get the new value + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 2) - hotCounts := h.counts[n%2] + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] if i < len(h.upperBounds) { atomic.AddUint64(&hotCounts.buckets[i], 1) @@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) { } func (h *histogram) Write(out *dto.Metric) error { - var ( - his = &dto.Histogram{} - buckets = make([]*dto.Bucket, len(h.upperBounds)) - hotCounts, coldCounts *histogramCounts - count uint64 - ) - - // For simplicity, we mutex the rest of this method. It is not in the - // hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it. + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. h.writeMtx.Lock() defer h.writeMtx.Unlock() - // This is a bit arcane, which is why the following spells out this if - // clause in English: - // - // If the currently-hot counts struct is #0, we atomically increment - // h.countAndHotIdx by 1 so that from now on Observe will use the counts - // struct #1. Furthermore, the atomic increment gives us the new value, - // which, in its most significant 63 bits, tells us the count of - // observations done so far up to and including currently ongoing - // observations still using the counts struct just changed from hot to - // cold. To have a normal uint64 for the count, we bitshift by 1 and - // save the result in count. We also set h.hotIdx to 1 for the next - // Write call, and we will refer to counts #1 as hotCounts and to counts - // #0 as coldCounts. - // - // If the currently-hot counts struct is #1, we do the corresponding - // things the other way round. We have to _decrement_ h.countAndHotIdx - // (which is a bit arcane in itself, as we have to express -1 with an - // unsigned int...). - if h.hotIdx == 0 { - count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 - h.hotIdx = 1 - hotCounts = h.counts[1] - coldCounts = h.counts[0] - } else { - count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. - h.hotIdx = 0 - hotCounts = h.counts[0] - coldCounts = h.counts[1] - } - - // Now we have to wait for the now-declared-cold counts to actually cool - // down, i.e. wait for all observations still using it to finish. That's - // the case once the count in the cold counts struct is the same as the - // one atomically retrieved from the upper 63bits of h.countAndHotIdx. - for { - if count == atomic.LoadUint64(&coldCounts.count) { - break - } + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { runtime.Gosched() // Let observations get work done. } - his.SampleCount = proto.Uint64(count) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - buckets[i] = &dto.Bucket{ + his.Bucket[i] = &dto.Bucket{ CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } } - his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/http.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 9f0875bfc81..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "compress/gzip" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) -// instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - httpError(rsp, err) - return - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - httpError(rsp, err) - return - } - } - }) -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: (1) It uses Summaries -// rather than Histograms. Summaries are not useful if aggregation across -// multiple instances is required. (2) It uses microseconds as unit, which is -// deprecated and should be replaced by seconds. (3) The size of the request is -// calculated in a separate goroutine. Since this calculator requires access to -// the request header, it creates a race with any writes to the header performed -// during request handling. httputil.ReverseProxy is a prominent example for a -// handler performing such writes. (4) It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current goroutine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() - - return out -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 55176d58ce6..9b809794212 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,8 +16,6 @@ package prometheus import ( "errors" "os" - - "github.com/prometheus/procfs" ) type processCollector struct { @@ -59,20 +57,9 @@ type ProcessCollectorOpts struct { // collector for the current process with an empty namespace string and no error // reporting. // -// Currently, the collector depends on a Linux-style proc filesystem and -// therefore only exports metrics for Linux. -// -// Note: An older version of this function had the following signature: -// -// NewProcessCollector(pid int, namespace string) Collector -// -// Most commonly, it was called as -// -// NewProcessCollector(os.Getpid(), "") -// -// The following call of the current version is equivalent to the above: -// -// NewProcessCollector(ProcessCollectorOpts{}) +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. func NewProcessCollector(opts ProcessCollectorOpts) Collector { ns := "" if len(opts.Namespace) > 0 { @@ -126,7 +113,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { + if canCollectProcess() { c.collectFn = c.processCollect } else { c.collectFn = func(ch chan<- Metric) { @@ -153,46 +140,6 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} - func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 00000000000..3117461cde7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 00000000000..e0b935d1fef --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 67b56d37cfd..fa535684f96 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -38,7 +38,6 @@ type delegator interface { type responseWriterDelegator struct { http.ResponseWriter - handler, method string status int written int64 wroteHeader bool @@ -75,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator } type flusherDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { @@ -93,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) @@ -196,4 +201,157 @@ func init() { http.CloseNotifier }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index 31a70695695..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -type pusherDelegator struct{ *responseWriterDelegator } - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func init() { - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go deleted file mode 100644 index 8bb9b8b68f8..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - - return pickDelegator[id](d) -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 668eb6b3c93..cea5a90fd9d 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -47,7 +47,6 @@ import ( const ( contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) @@ -85,10 +84,32 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var inFlightSem chan struct{} + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + if opts.MaxRequestsInFlight > 0 { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { @@ -107,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) } + errCnt.WithLabelValues("gathering").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -147,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error encoding and sending metric family:", err) } + errCnt.WithLabelValues("encoding").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -237,9 +260,12 @@ const ( // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. ContinueOnError // Panic upon the first error encountered (useful for "crash only" apps). PanicOnError @@ -262,6 +288,18 @@ type HandlerOpts struct { // logged regardless of the configured ErrorHandling provided ErrorLog // is not nil. ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 86fd564470f..83c49b66a81 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -14,7 +14,9 @@ package promhttp import ( + "crypto/tls" "net/http" + "net/http/httptrace" "time" "github.com/prometheus/client_golang/prometheus" @@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT return resp, err }) } + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go deleted file mode 100644 index a034d1ec0f1..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" -) - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/registry.go index b5e70b93fa1..6c32516aa2e 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -325,9 +325,17 @@ func (r *Registry) Register(c Collector) error { return nil } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } } } // If the collectorID is new, but at least one of the descs existed @@ -680,7 +688,7 @@ func processMetric( // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 2980614dff4..c970fdee0e4 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,8 +16,10 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" @@ -37,7 +39,7 @@ const quantileLabel = "quantile" // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estimations at all by +// upcoming v1.0.0 of the library. There will be no rank estimations at all by // default. For a sane transition, it is recommended to set the desired rank // estimations explicitly. // @@ -56,16 +58,8 @@ type Summary interface { Observe(float64) } -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. @@ -84,7 +78,7 @@ const ( // mandatory to set Name to a non-empty string. While all other fields are // optional and can safely be left at their zero value, it is recommended to set // a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v0.10 of the library. +// as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -121,13 +115,8 @@ type SummaryOpts struct { // Objectives defines the quantile rank estimates with their respective // absolute error. If Objectives[q] = e, then the value reported for q // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. + // default value is an empty map, resulting in a summary without + // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -151,7 +140,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -196,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } if opts.Objectives == nil { - opts.Objectives = DefObjectives + opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { @@ -214,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, @@ -382,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 49159bf3eb0..e303eef6d33 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -32,6 +32,12 @@ import ( // WrapRegistererWith provides a way to add fixed labels to a subset of // Collectors. It should not be used to add fixed labels to all metrics exposed. // +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// // The Collector example demonstrates a use of WrapRegistererWith. func WrapRegistererWith(labels Labels, reg Registerer) Registerer { return &wrappingRegisterer{ @@ -54,6 +60,12 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // (see NewGoCollector) and the process collector (see NewProcessCollector). (In // fact, those metrics are already prefixed with “go_” or “process_”, // respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { return &wrappingRegisterer{ wrappedRegisterer: reg, @@ -123,6 +135,15 @@ func (c *wrappingCollector) Describe(ch chan<- *Desc) { } } +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + type wrappingMetric struct { wrappedMetric Metric prefix string diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb654..26e92288c7c 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/metric.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/metric.go index f7250909b9f..00804b7fedb 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/common/model/metric.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/metric.go @@ -21,7 +21,6 @@ import ( ) var ( - separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go index 46259b1f109..7b0064fdb23 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go @@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - *t = Time(v + va) + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } default: return fmt.Errorf("invalid time %q", string(b)) diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/.golangci.yml b/cluster-autoscaler/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 00000000000..438ca92eca5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,6 @@ +# Run only staticcheck for now. Additional linters will be enabled one-by-one. +linters: + enable: + - staticcheck + - govet + disable-all: true diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/cluster-autoscaler/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 35993c41c27..56ba67d3e31 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1 +1,2 @@ -* Tobias Schmidt +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile index 947d7d8fa71..616a0d25eb0 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile @@ -14,17 +14,16 @@ include Makefile.common %/.unpacked: %.ttar + @echo ">> extracting fixtures" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -update_fixtures: fixtures.ttar sysfs/fixtures.ttar - -%fixtures.ttar: %/fixtures - rm -v $(dir $*)fixtures/.unpacked - ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ +update_fixtures: + rm -vf fixtures/.unpacked + ./ttar -c -f fixtures.ttar fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test +test: fixtures/.unpacked common-test diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common index 741579e60f2..c7f9ea64ff5 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common @@ -29,12 +29,15 @@ GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -unexport GOVENDOR +GOVENDOR := +GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). @@ -55,32 +58,57 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$( # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif - - unexport GO111MODULE endif PROMU := $(FIRST_GOPATH)/bin/promu -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = ./... -GO_VERSION ?= $(shell $(GO) version) -GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif -PROMU_VERSION ?= 0.2.0 +PROMU_VERSION ?= 0.4.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.16.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom -.PHONY: all -all: precheck style staticcheck unused build test +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; +.PHONY: common-all +common-all: precheck style check_license lint unused build test + .PHONY: common-style common-style: @echo ">> checking code style" @@ -102,6 +130,15 @@ common-check_license: exit 1; \ fi +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + .PHONY: common-test-short common-test-short: @echo ">> running short tests" @@ -110,26 +147,35 @@ common-test-short: .PHONY: common-test common-test: @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) -.PHONY: common-staticcheck -common-staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else - $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + $(GOLANGCI_LINT) run $(pkgs) endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) @@ -140,8 +186,9 @@ else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod -ifneq (,$(wildcard vendor)) +else @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ @@ -159,45 +206,48 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) -.PHONY: common-docker -common-docker: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . - -.PHONY: common-docker-publish -common-docker-publish: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker-tag-latest -common-docker-tag-latest: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + . + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): - curl -s -L $(PROMU_URL) | tar -xvz -C /tmp - mkdir -v -p $(FIRST_GOPATH)/bin - cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh -.PHONY: $(STATICCHECK) -$(STATICCHECK): -ifdef GO111MODULE -# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. -# See https://github.com/golang/go/issues/27643. -# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. - tmpModule=$$(mktemp -d 2>&1) && \ - mkdir -p $${tmpModule}/staticcheck && \ - cd "$${tmpModule}"/staticcheck && \ - GO111MODULE=on $(GO) mod init example.com/staticcheck && \ - GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ - rm -rf $${tmpModule}; -else - GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR @@ -212,7 +262,6 @@ precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck - PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/README.md b/cluster-autoscaler/vendor/github.com/prometheus/procfs/README.md index 2095494719b..6f8850feb67 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/README.md +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/README.md @@ -1,7 +1,7 @@ # procfs This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. +metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. @@ -9,3 +9,45 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Building and Testing + +The procfs library is normally built as part of another application. However, when making +changes to the library, the `make test` command can be used to run the API test suite. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/buddyinfo.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/buddyinfo.go index d3a8268078c..63d4229a45a 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,19 +31,9 @@ type BuddyInfo struct { Sizes []float64 } -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/fixtures.ttar b/cluster-autoscaler/vendor/github.com/prometheus/procfs/fixtures.ttar index 13c831ef599..951d909af53 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -1,45 +1,48 @@ # Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231 +Directory: fixtures/proc/26231 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cmdline +Path: fixtures/proc/26231/cmdline Lines: 1 vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/comm +Path: fixtures/proc/26231/comm Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cwd +Path: fixtures/proc/26231/cwd SymlinkTo: /usr/bin # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/exe +Path: fixtures/proc/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/fd +Directory: fixtures/proc/26231/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/0 +Path: fixtures/proc/26231/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/1 +Path: fixtures/proc/26231/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/10 +Path: fixtures/proc/26231/fd/10 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/2 +Path: fixtures/proc/26231/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/3 +Path: fixtures/proc/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/io +Path: fixtures/proc/26231/io Lines: 7 rchar: 750339 wchar: 818609 @@ -50,7 +53,7 @@ write_bytes: 2048 cancelled_write_bytes: -1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/limits +Path: fixtures/proc/26231/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds @@ -71,14 +74,14 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/mountstats -Lines: 19 +Path: fixtures/proc/26231/mountstats +Lines: 20 device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured @@ -91,13 +94,14 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers= NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/net +Directory: fixtures/proc/26231/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/net/dev +Path: fixtures/proc/26231/net/dev Lines: 4 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -105,142 +109,226 @@ Inter-| Receive | Transmit eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/ns +Directory: fixtures/proc/26231/ns Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/mnt +Path: fixtures/proc/26231/ns/mnt SymlinkTo: mnt:[4026531840] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/net +Path: fixtures/proc/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/root +Path: fixtures/proc/26231/root SymlinkTo: / # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/stat +Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232 +Path: fixtures/proc/26231/status +Lines: 53 + +Name: prometheus +Umask: 0022 +State: S (sleeping) +Tgid: 1 +Ngid: 0 +Pid: 1 +PPid: 0 +TracerPid: 0 +Uid: 0 0 0 0 +Gid: 0 0 0 0 +FDSize: 128 +Groups: +NStgid: 1 +NSpid: 1 +NSpgid: 1 +NSsid: 1 +VmPeak: 58472 kB +VmSize: 58440 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 8028 kB +VmRSS: 6716 kB +RssAnon: 2092 kB +RssFile: 4624 kB +RssShmem: 0 kB +VmData: 2580 kB +VmStk: 136 kB +VmExe: 948 kB +VmLib: 6816 kB +VmPTE: 128 kB +VmPMD: 12 kB +VmSwap: 660 kB +HugetlbPages: 0 kB +Threads: 1 +SigQ: 8/63965 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 7be3c0fe28014a03 +SigIgn: 0000000000001000 +SigCgt: 00000001800004ec +CapInh: 0000000000000000 +CapPrm: 0000003fffffffff +CapEff: 0000003fffffffff +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Cpus_allowed: ff +Cpus_allowed_list: 0-7 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 4742839 +nonvoluntary_ctxt_switches: 1727500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cmdline +Path: fixtures/proc/26232/cmdline Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/comm +Path: fixtures/proc/26232/comm Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cwd +Path: fixtures/proc/26232/cwd SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232/fd +Directory: fixtures/proc/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/0 +Path: fixtures/proc/26232/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/1 +Path: fixtures/proc/26232/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/2 +Path: fixtures/proc/26232/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/3 +Path: fixtures/proc/26232/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/4 +Path: fixtures/proc/26232/fd/4 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/limits +Path: fixtures/proc/26232/limits Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/root +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/root SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/stat +Path: fixtures/proc/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26233 +Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26233/cmdline +Path: fixtures/proc/26233/cmdline Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/584 +Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/584/stat +Path: fixtures/proc/584/stat Lines: 2 1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 #!/bin/cat /proc/self/stat Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/short -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/short/buddyinfo -Lines: 3 -Node 0, zone -Node 0, zone -Node 0, zone -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/sizemismatch -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Path: fixtures/proc/buddyinfo Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/valid -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/valid/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 +Path: fixtures/proc/diskstats +Lines: 49 + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 + 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 + 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 + 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 + 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 + 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 + 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 + 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 + 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 + 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 + 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 + 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 + 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 + 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 + 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 + 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 + 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 + 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 + 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 + 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 + 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 + 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 +Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs +Directory: fixtures/proc/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs/xfs +Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/fs/xfs/stat +Path: fixtures/proc/fs/xfs/stat Lines: 23 extent_alloc 92447 97589 92448 93751 abt 0 0 0 0 @@ -267,18 +355,18 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/mdstat +Path: fixtures/proc/mdstat Lines: 26 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - + md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] - + md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] - + md4 : inactive raid1 sda3[0] sdb3[1] 4883648 blocks [2/2] [UU] @@ -297,10 +385,10 @@ md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net +Directory: fixtures/proc/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/dev +Path: fixtures/proc/net/dev Lines: 6 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -310,7 +398,7 @@ docker0: 2568 38 0 0 0 0 0 0 438 eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs +Path: fixtures/proc/net/ip_vs Lines: 21 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags @@ -335,7 +423,7 @@ FWM 10001000 wlc -> C0A83215:0CEA Route 0 0 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs_stats +Path: fixtures/proc/net/ip_vs_stats Lines: 6 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes @@ -345,10 +433,10 @@ Lines: 6 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net/rpc +Directory: fixtures/proc/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfs +Path: fixtures/proc/net/rpc/nfs Lines: 5 net 18628 0 18628 6 rpc 4329785 0 4338291 @@ -357,7 +445,7 @@ proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfsd +Path: fixtures/proc/net/rpc/nfsd Lines: 11 rc 0 6 18622 fh 0 0 0 0 0 @@ -372,7 +460,27 @@ proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/xfrm_stat +Path: fixtures/proc/net/unix +Lines: 6 +Num RefCount Protocol Flags Type St Inode Path +0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 5091797 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix_without_inode +Lines: 6 +Num RefCount Protocol Flags Type St Path +0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/xfrm_stat Lines: 28 XfrmInError 1 XfrmInBufferError 2 @@ -404,10 +512,30 @@ XfrmOutStateInvalid 28765 XfrmAcquireError 24532 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/self +Directory: fixtures/proc/pressure +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/cpu +Lines: 1 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/io +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/memory +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/stat +Path: fixtures/proc/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 @@ -427,36 +555,1254 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/symlinktargets +Directory: fixtures/proc/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/README +Path: fixtures/proc/symlinktargets/README Lines: 2 This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/abc +Path: fixtures/proc/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/def Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/def +Path: fixtures/proc/symlinktargets/ghi Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/ghi +Path: fixtures/proc/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/uvw +Path: fixtures/sys/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/xyz +Path: fixtures/sys/class/net/eth0/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/.unpacked +Path: fixtures/sys/class/net/eth0/phys_switch_id Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/alarm +Lines: 1 +2503000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/capacity +Lines: 1 +98 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_full +Lines: 1 +50060000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=12229000 +POWER_SUPPLY_POWER_NOW=4830000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=50060000 +POWER_SUPPLY_ENERGY_NOW=49450000 +POWER_SUPPLY_CAPACITY=98 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/temp +Lines: 1 +49925 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/type +Lines: 1 +bcm2835_thermal +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/mode +Lines: 1 +enabled +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/passive +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/temp +Lines: 1 +44000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/type +Lines: 1 +acpitz Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sda1/stats/stats +Lines: 1 +extent_alloc 1 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sdb1/stats/stats +Lines: 1 +extent_alloc 2 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/fs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/fs.go index b6c6b2ce1f0..0102ab0fd85 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/fs.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/fs.go @@ -14,69 +14,30 @@ package procfs import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/nfs" - "github.com/prometheus/procfs/xfs" + "github.com/prometheus/procfs/internal/fs" ) -// FS represents the pseudo-filesystem proc, which provides an interface to +// FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) +type FS struct { + proc fs.FS } -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint - return nfs.ParseClientRPCStats(f) +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) } -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) if err != nil { - return nil, err + return FS{}, err } - defer f.Close() - - return nfs.ParseServerRPCStats(f) + return FS{fs}, nil } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.mod b/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.mod index e89ee6c90f6..8a1b839fd23 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.mod +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.mod @@ -1 +1,3 @@ module github.com/prometheus/procfs + +require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.sum b/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.sum new file mode 100644 index 00000000000..7827dd3d56c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 00000000000..c66a1cf80e4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/parse.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 2ff228e9d1f..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go deleted file mode 100644 index df0d567b780..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - b := make([]byte, 128) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/ipvs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/ipvs.go index e36d4a3bd08..2d6cb8d1c6c 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/ipvs.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/ipvs.go @@ -62,19 +62,9 @@ type IPVSBackendStatus struct { Weight uint64 } -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } @@ -131,19 +121,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mdstat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mdstat.go index 9dc19583d8d..71c10678250 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mdstat.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mdstat.go @@ -42,64 +42,64 @@ type MDStat struct { BlocksSynced int64 } -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdstatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdstatData), "\n") for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + if strings.TrimSpace(l) == "" || l[0] == ' ' || + strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { continue } - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) + deviceFields := strings.Fields(l) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l) } - mdName := mainLine[0] - activityState := mainLine[2] + mdName := deviceFields[0] + activityState := deviceFields[2] if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) + return mdStats, fmt.Errorf("missing lines for md device %s", mdName) } - active, total, size, err := evalStatusline(lines[i+1]) + active, total, size, err := evalStatusLine(lines[i+1]) if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, err } - // j is the line number of the syncing-line. - j := i + 2 + syncLineIdx := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 + syncLineIdx++ } - // If device is syncing at the moment, get the number of currently + // If device is recovering/syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) + if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, err } } - mdStates = append(mdStates, MDStat{ + mdStats = append(mdStats, MDStat{ Name: mdName, ActivityState: activityState, DisksActive: active, @@ -109,10 +109,10 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { }) } - return mdStates, nil + return mdStats, nil } -func evalStatusline(statusline string) (active, total, size int64, err error) { +func evalStatusLine(statusline string) (active, total, size int64, err error) { matches := statuslineRE.FindStringSubmatch(statusline) if len(matches) != 4 { return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) @@ -136,7 +136,7 @@ func evalStatusline(statusline string) (active, total, size int64, err error) { return active, total, size, nil } -func evalBuildline(buildline string) (syncedBlocks int64, err error) { +func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) { matches := buildlineRE.FindStringSubmatch(buildline) if len(matches) != 2 { return 0, fmt.Errorf("unexpected buildline: %s", buildline) diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go index 7a8a1e09901..35b2ef3513f 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go @@ -69,6 +69,8 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string + // The mount options of the NFS mount. + Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -179,11 +181,11 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration + CumulativeQueueMilliseconds uint64 // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration + CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration + CumulativeTotalRequestMilliseconds uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -202,7 +204,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration + IdleTimeSeconds uint64 // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -317,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( + fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -338,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } case fieldAge: // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -509,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], }) } @@ -593,7 +608,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, + IdleTimeSeconds: ns[4], Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_dev.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_dev.go index 3f2523371ab..a0b7a01196a 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_dev.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_dev.go @@ -47,23 +47,13 @@ type NetDevLine struct { // are interface names. type NetDev map[string]NetDevLine -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() -} - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) } -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } @@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) { } defer f.Close() - nd := NetDev{} + netDev := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. @@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) { continue } - line, err := nd.parseLine(s.Text()) + line, err := netDev.parseLine(s.Text()) if err != nil { - return nd, err + return netDev, err } - nd[line.Name] = *line + netDev[line.Name] = *line } - return nd, s.Err() + return netDev, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") @@ -185,11 +175,11 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { +func (netDev NetDev) Total() NetDevLine { total := NetDevLine{} - names := make([]string, 0, len(nd)) - for _, ifc := range nd { + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_unix.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 00000000000..240340a83ab --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +const ( + netUnixKernelPtrIdx = iota + netUnixRefCountIdx + _ + netUnixFlagsIdx + netUnixTypeIdx + netUnixStateIdx + netUnixInodeIdx + + // Inode and Path are optional. + netUnixStaticFieldsCnt = 6 +) + +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") + +// NetUnixType is the type of the type field. +type NetUnixType uint64 + +// NetUnixFlags is the type of the flags field. +type NetUnixFlags uint64 + +// NetUnixState is the type of the state field. +type NetUnixState uint64 + +// NetUnixLine represents a line of /proc/net/unix. +type NetUnixLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUnixFlags + Type NetUnixType + State NetUnixState + Inode uint64 + Path string +} + +// NetUnix holds the data read from /proc/net/unix. +type NetUnix struct { + Rows []*NetUnixLine +} + +// NewNetUnix returns data read from /proc/net/unix. +func NewNetUnix() (*NetUnix, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetUnix() +} + +// NewNetUnix returns data read from /proc/net/unix. +func (fs FS) NewNetUnix() (*NetUnix, error) { + return NewNetUnixByPath(fs.proc.Path("net/unix")) +} + +// NewNetUnixByPath returns data read from /proc/net/unix by file path. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByPath(path string) (*NetUnix, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return NewNetUnixByReader(f) +} + +// NewNetUnixByReader returns data read from /proc/net/unix by a reader. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { + nu := &NetUnix{ + Rows: make([]*NetUnixLine, 0, 32), + } + scanner := bufio.NewScanner(reader) + // Omit the header line. + scanner.Scan() + header := scanner.Text() + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. + // This code works for both cases. + hasInode := strings.Contains(header, "Inode") + + minFieldsCnt := netUnixStaticFieldsCnt + if hasInode { + minFieldsCnt++ + } + for scanner.Scan() { + line := scanner.Text() + item, err := nu.parseLine(line, hasInode, minFieldsCnt) + if err != nil { + return nu, err + } + nu.Rows = append(nu.Rows, item) + } + + return nu, scanner.Err() +} + +func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { + fields := strings.Fields(line) + fieldsLen := len(fields) + if fieldsLen < minFieldsCnt { + return nil, fmt.Errorf( + "Parse Unix domain failed: expect at least %d fields but got %d", + minFieldsCnt, fieldsLen) + } + kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) + } + users, err := u.parseUsers(fields[netUnixRefCountIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) + } + flags, err := u.parseFlags(fields[netUnixFlagsIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) + } + typ, err := u.parseType(fields[netUnixTypeIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) + } + state, err := u.parseState(fields[netUnixStateIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) + } + var inode uint64 + if hasInode { + inodeStr := fields[netUnixInodeIdx] + inode, err = u.parseInode(inodeStr) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) + } + } + + nuLine := &NetUnixLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if fieldsLen > minFieldsCnt { + pathIdx := netUnixInodeIdx + 1 + if !hasInode { + pathIdx-- + } + nuLine.Path = fields[pathIdx] + } + + return nuLine, nil +} + +func (u NetUnix) parseKernelPtr(str string) (string, error) { + if !strings.HasSuffix(str, ":") { + return "", errInvalidKernelPtrFmt + } + return str[:len(str)-1], nil +} + +func (u NetUnix) parseUsers(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { + typ, err := strconv.ParseUint(hexStr, 16, 16) + if err != nil { + return 0, err + } + return NetUnixType(typ), nil +} + +func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { + flags, err := strconv.ParseUint(hexStr, 16, 32) + if err != nil { + return 0, err + } + return NetUnixFlags(flags), nil +} + +func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { + st, err := strconv.ParseInt(hexStr, 16, 8) + if err != nil { + return 0, err + } + return NetUnixState(st), nil +} + +func (u NetUnix) parseInode(inodeStr string) (uint64, error) { + return strconv.ParseUint(inodeStr, 10, 64) +} + +func (t NetUnixType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUnixFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUnixState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/nfs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf681952..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc5bc5..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5ad9bd..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a35858..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc.go index 06bed0ef4a3..8a8430147ea 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc.go @@ -20,6 +20,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" ) // Proc provides information about a running process. @@ -27,7 +29,7 @@ type Proc struct { // The process ID. PID int - fs FS + fs fs.FS } // Procs represents a list of Proc structs. @@ -52,7 +54,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -66,28 +68,35 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := os.Readlink(fs.proc.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs}, nil + return Proc{PID: pid, fs: fs.proc}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := os.Open(fs.proc.Path()) if err != nil { return Procs{}, err } @@ -104,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs}) + p = append(p, Proc{PID: int(pid), fs: fs.proc}) } return p, nil diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_io.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_io.go index 0251c83bfe8..0ff89b1cef1 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_io.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_io.go @@ -39,8 +39,8 @@ type ProcIO struct { CancelledWriteBytes int64 } -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} f, err := os.Open(p.path("io")) diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_limits.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_limits.go index f04ba6fda85..91ee24df8bd 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_limits.go @@ -78,7 +78,14 @@ var ( ) // NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_ns.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_ns.go index d06c26ebad9..c66740ff746 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_ns.go @@ -29,9 +29,9 @@ type Namespace struct { // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// Namespaces reads from /proc//ns/* to get the namespaces of which the // process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { +func (p Proc) Namespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_psi.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 00000000000..46fe2662637 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,101 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + defer file.Close() + return parsePSIStats(resource, file) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + stats, err := ioutil.ReadAll(file) + if err != nil { + return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) + } + + for _, l := range strings.Split(string(stats), "\n") { + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go index 3cf2a9f18f0..6ed98a8ae37 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,6 +18,8 @@ import ( "fmt" "io/ioutil" "os" + + "github.com/prometheus/procfs/internal/fs" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -95,15 +97,22 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize int + VSize uint // Resident set size in pages. RSS int - fs FS + proc fs.FS } // NewStat returns the current status information of the process. +// +// Deprecated: use NewStat() instead func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { f, err := os.Open(p.path("stat")) if err != nil { return ProcStat{}, err @@ -118,7 +127,7 @@ func (p Proc) NewStat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, fs: p.fs} + s = ProcStat{PID: p.PID, proc: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) @@ -164,7 +173,7 @@ func (p Proc) NewStat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { +func (s ProcStat) VirtualMemory() uint { return s.VSize } @@ -175,7 +184,8 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() + fs := FS{proc: s.proc} + stat, err := fs.Stat() if err != nil { return 0, err } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 00000000000..6b4b61f71cd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Peak virtual memory size. + VmPeak uint64 + // Virtual memory size. + VmSize uint64 + // Locked memory size. + VmLck uint64 + // Pinned memory size. + VmPin uint64 + // Peak resident set size. + VmHWM uint64 + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 + // Size of resident anonymous memory. + RssAnon uint64 + // Size of resident file mappings. + RssFile uint64 + // Size of resident shared memory. + RssShmem uint64 + // Size of data segments. + VmData uint64 + // Size of stack segments. + VmStk uint64 + // Size of text segments. + VmExe uint64 + // Shared library code size. + VmLib uint64 + // Page table entries size. + VmPTE uint64 + // Size of second-level page tables. + VmPMD uint64 + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + f, err := os.Open(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Name": + s.Name = vString + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go index 61eb6b0e3ce..6661ee03a66 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go @@ -20,6 +20,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" ) // CPUStat shows how much time the cpu spend in various stages. @@ -78,16 +80,6 @@ type Stat struct { SoftIRQ SoftIRQStat } -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} @@ -149,11 +141,31 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { return softIRQStat, total, nil } -// NewStat returns an information about current kernel/system statistics. +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) + f, err := os.Open(fs.proc.Path("stat")) if err != nil { return Stat{}, err } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/ttar b/cluster-autoscaler/vendor/github.com/prometheus/procfs/ttar index b0171a12b59..19ef02b8d4e 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/ttar +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/ttar @@ -86,8 +86,10 @@ Usage: $bname [-C ] -c -f (create archive) $bname [-C ] -x -f (extract archive) Options: - -C (change directory) - -v (verbose) + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -111,8 +113,9 @@ function set_cmd { } unset VERBOSE +unset RECURSIVE_UNLINK -while getopts :cf:htxvC: opt; do +while getopts :cf:-:htxvC: opt; do case $opt in c) set_cmd "create" @@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do C) CDIR=$OPTARG ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo @@ -212,16 +227,16 @@ function extract { local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line + # An EOF not preceded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash + # Replace NULLBYTE with null byte unless preceded by backslash # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash + # Remove EOF unless preceded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" @@ -245,7 +260,16 @@ function extract { fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then @@ -338,8 +362,8 @@ function _create { else < "$file" \ sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfrm.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfrm.go index 8f1508f0fd1..30aa417d530 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfrm.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfrm.go @@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) { // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/parse.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index 2bc0ef3427d..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/xfs.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/xfs.go deleted file mode 100644 index d86794b7ca9..00000000000 --- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/xfs/xfs.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package xfs provides access to statistics exposed by the XFS filesystem. -package xfs - -// Stats contains XFS filesystem runtime statistics, parsed from -// /proc/fs/xfs/stat. -// -// The names and meanings of each statistic were taken from -// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux -// kernel source. Most counters are uint32s (same data types used in -// xfs_stats.h), but some of the "extended precision stats" are uint64s. -type Stats struct { - // The name of the filesystem used to source these statistics. - // If empty, this indicates aggregated statistics for all XFS - // filesystems on the host. - Name string - - ExtentAllocation ExtentAllocationStats - AllocationBTree BTreeStats - BlockMapping BlockMappingStats - BlockMapBTree BTreeStats - DirectoryOperation DirectoryOperationStats - Transaction TransactionStats - InodeOperation InodeOperationStats - LogOperation LogOperationStats - ReadWrite ReadWriteStats - AttributeOperation AttributeOperationStats - InodeClustering InodeClusteringStats - Vnode VnodeStats - Buffer BufferStats - ExtendedPrecision ExtendedPrecisionStats -} - -// ExtentAllocationStats contains statistics regarding XFS extent allocations. -type ExtentAllocationStats struct { - ExtentsAllocated uint32 - BlocksAllocated uint32 - ExtentsFreed uint32 - BlocksFreed uint32 -} - -// BTreeStats contains statistics regarding an XFS internal B-tree. -type BTreeStats struct { - Lookups uint32 - Compares uint32 - RecordsInserted uint32 - RecordsDeleted uint32 -} - -// BlockMappingStats contains statistics regarding XFS block maps. -type BlockMappingStats struct { - Reads uint32 - Writes uint32 - Unmaps uint32 - ExtentListInsertions uint32 - ExtentListDeletions uint32 - ExtentListLookups uint32 - ExtentListCompares uint32 -} - -// DirectoryOperationStats contains statistics regarding XFS directory entries. -type DirectoryOperationStats struct { - Lookups uint32 - Creates uint32 - Removes uint32 - Getdents uint32 -} - -// TransactionStats contains statistics regarding XFS metadata transactions. -type TransactionStats struct { - Sync uint32 - Async uint32 - Empty uint32 -} - -// InodeOperationStats contains statistics regarding XFS inode operations. -type InodeOperationStats struct { - Attempts uint32 - Found uint32 - Recycle uint32 - Missed uint32 - Duplicate uint32 - Reclaims uint32 - AttributeChange uint32 -} - -// LogOperationStats contains statistics regarding the XFS log buffer. -type LogOperationStats struct { - Writes uint32 - Blocks uint32 - NoInternalBuffers uint32 - Force uint32 - ForceSleep uint32 -} - -// ReadWriteStats contains statistics regarding the number of read and write -// system calls for XFS filesystems. -type ReadWriteStats struct { - Read uint32 - Write uint32 -} - -// AttributeOperationStats contains statistics regarding manipulation of -// XFS extended file attributes. -type AttributeOperationStats struct { - Get uint32 - Set uint32 - Remove uint32 - List uint32 -} - -// InodeClusteringStats contains statistics regarding XFS inode clustering -// operations. -type InodeClusteringStats struct { - Iflush uint32 - Flush uint32 - FlushInode uint32 -} - -// VnodeStats contains statistics regarding XFS vnode operations. -type VnodeStats struct { - Active uint32 - Allocate uint32 - Get uint32 - Hold uint32 - Release uint32 - Reclaim uint32 - Remove uint32 - Free uint32 -} - -// BufferStats contains statistics regarding XFS read/write I/O buffers. -type BufferStats struct { - Get uint32 - Create uint32 - GetLocked uint32 - GetLockedWaited uint32 - BusyLocked uint32 - MissLocked uint32 - PageRetries uint32 - PageFound uint32 - GetRead uint32 -} - -// ExtendedPrecisionStats contains high precision counters used to track the -// total number of bytes read, written, or flushed, during XFS operations. -type ExtendedPrecisionStats struct { - FlushBytes uint64 - WriteBytes uint64 - ReadBytes uint64 -} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/.travis.yml b/cluster-autoscaler/vendor/github.com/spf13/pflag/.travis.yml index f8a63b308ba..00d04cb9b02 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/README.md b/cluster-autoscaler/vendor/github.com/spf13/pflag/README.md index b052414d129..7eacc5bdbe5 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/README.md +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/bool_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a75a..3731370d6a5 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/bool_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/count.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/count.go index aa126e44d1c..a0b2679f71c 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/count.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/duration_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc104..badadda53fd 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/duration_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/flag.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/flag.go index 9beeda8ecca..24a5036e95b 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/flag.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/float32_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 00000000000..caa352741a6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/float64_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 00000000000..85bf3073d50 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/go.mod b/cluster-autoscaler/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 00000000000..b2287eec134 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/go.sum b/cluster-autoscaler/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/int32_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 00000000000..ff128ff06d8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/int64_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 00000000000..25464638f3a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/int_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edde95..e71c39d91aa 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/int_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/ip_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe3fb..775faae4fd8 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/ip_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/string_array.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_array.go index fa7bc60187a..4894af81802 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/string_array.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/string_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc083e..3cb2e69dba0 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/string_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/string_to_int64.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 00000000000..a807a04a0ba --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/cluster-autoscaler/vendor/github.com/spf13/pflag/uint_slice.go b/cluster-autoscaler/vendor/github.com/spf13/pflag/uint_slice.go index edd94c600af..5fa924835ed 100644 --- a/cluster-autoscaler/vendor/github.com/spf13/pflag/uint_slice.go +++ b/cluster-autoscaler/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go index aa1c2b95cdd..e0364e9e7f6 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -113,6 +113,17 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Exactlyf asserts that two objects are equal in value and type. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) @@ -157,6 +168,31 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool return FileExists(t, path, append([]interface{}{msg}, args...)...) } +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // @@ -289,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // @@ -300,6 +344,31 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf return Len(t, object, length, append([]interface{}{msg}, args...)...) } +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Less(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") @@ -444,6 +513,19 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) } +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Same(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go index de39f794e72..26830403a9b 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -215,6 +215,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { return Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -303,6 +325,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b return FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -567,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. return JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -589,6 +677,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in return Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -877,6 +1015,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . return Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_order.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 00000000000..15a486ca6e2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,309 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return -1, true + } + if intobj1 == intobj2 { + return 0, true + } + if intobj1 < intobj2 { + return 1, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return -1, true + } + if int8obj1 == int8obj2 { + return 0, true + } + if int8obj1 < int8obj2 { + return 1, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return -1, true + } + if int16obj1 == int16obj2 { + return 0, true + } + if int16obj1 < int16obj2 { + return 1, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return -1, true + } + if int32obj1 == int32obj2 { + return 0, true + } + if int32obj1 < int32obj2 { + return 1, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return -1, true + } + if int64obj1 == int64obj2 { + return 0, true + } + if int64obj1 < int64obj2 { + return 1, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return -1, true + } + if uintobj1 == uintobj2 { + return 0, true + } + if uintobj1 < uintobj2 { + return 1, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return -1, true + } + if uint8obj1 == uint8obj2 { + return 0, true + } + if uint8obj1 < uint8obj2 { + return 1, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return -1, true + } + if uint16obj1 == uint16obj2 { + return 0, true + } + if uint16obj1 < uint16obj2 { + return 1, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return -1, true + } + if uint32obj1 == uint32obj2 { + return 0, true + } + if uint32obj1 < uint32obj2 { + return 1, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return -1, true + } + if uint64obj1 == uint64obj2 { + return 0, true + } + if uint64obj1 < uint64obj2 { + return 1, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return -1, true + } + if float32obj1 == float32obj2 { + return 0, true + } + if float32obj1 < float32obj2 { + return 1, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return -1, true + } + if float64obj1 == float64obj2 { + return 0, true + } + if float64obj1 < float64obj2 { + return 1, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return -1, true + } + if stringobj1 == stringobj2 { + return 0, true + } + if stringobj1 < stringobj2 { + return 1, true + } + } + } + + return 0, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go index 9bd4a80e484..044da8b01f2 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go @@ -18,6 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" + yaml "gopkg.in/yaml.v2" ) //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl @@ -350,6 +351,37 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) + if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { + return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) + } + + expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) + if expectedType != actualType { + return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", + expectedType, actualType), msgAndArgs...) + } + + if expected != actual { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + } + + return true +} + // formatUnequalValues takes two values of arbitrary types and returns string // representations appropriate to be presented to the user. // @@ -479,14 +511,14 @@ func isEmpty(object interface{}) bool { // collection types are empty when they have no element case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -629,7 +661,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ func includeElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) + listKind := reflect.TypeOf(list).Kind() defer func() { if e := recover(); e != nil { ok = false @@ -637,11 +669,12 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { } }() - if reflect.TypeOf(list).Kind() == reflect.String { + if listKind == reflect.String { + elementValue := reflect.ValueOf(element) return true, strings.Contains(listValue.String(), elementValue.String()) } - if reflect.TypeOf(list).Kind() == reflect.Map { + if listKind == reflect.Map { mapKeys := listValue.MapKeys() for i := 0; i < len(mapKeys); i++ { if ObjectsAreEqual(mapKeys[i].Interface(), element) { @@ -1337,6 +1370,24 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedYAMLAsInterface, actualYAMLAsInterface interface{} + + if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) +} + func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { t := reflect.TypeOf(v) k := t.Kind() @@ -1371,8 +1422,8 @@ func diff(expected interface{}, actual interface{}) string { e = spewConfig.Sdump(expected) a = spewConfig.Sdump(actual) } else { - e = expected.(string) - a = actual.(string) + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1414,3 +1465,34 @@ var spewConfig = spew.ConfigState{ type tHelper interface { Helper() } + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + timer := time.NewTimer(waitFor) + ticker := time.NewTicker(tick) + checkPassed := make(chan bool) + defer timer.Stop() + defer ticker.Stop() + defer close(checkPassed) + for { + select { + case <-timer.C: + return Fail(t, "Condition never satisfied", msgAndArgs...) + case result := <-checkPassed: + if result { + return true + } + case <-ticker.C: + go func() { + checkPassed <- condition() + }() + } + } +} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go index d6694ed78a1..b5288af5b7c 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go @@ -262,17 +262,21 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { // */ func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { + var expectedCall *Call + for i, call := range m.ExpectedCalls { + if call.Method == method { _, diffCount := call.Arguments.Diff(arguments) if diffCount == 0 { - return i, call + expectedCall = call + if call.Repeatability > -1 { + return i, call + } } - } } - return -1, nil + + return -1, expectedCall } func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { @@ -344,13 +348,17 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { + // expected call found but it has already been called with repeatable times + if call != nil { + m.mutex.Unlock() + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } // we have to fail here - because we don't know what to do // as the return arguments. This is because: // // a) this is a totally unexpected call to this method, // b) the arguments are not what was expected, or // c) the developer has forgotten to add an accompanying On...Return pair. - closestCall, mismatch := m.findClosestCall(methodName, arguments...) m.mutex.Unlock() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go index 535f293490c..c5903f5dbb8 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go @@ -14,23 +14,23 @@ import ( // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if assert.Condition(t, comp, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Condition(t, comp, msgAndArgs...) { + return + } t.FailNow() } // Conditionf uses a Comparison to assert a complex condition. func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { - if assert.Conditionf(t, comp, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Conditionf(t, comp, msg, args...) { + return + } t.FailNow() } @@ -41,12 +41,12 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // assert.Contains(t, ["Hello", "World"], "World") // assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.Contains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Contains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -57,34 +57,34 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") // assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.Containsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Containsf(t, s, contains, msg, args...) { + return + } t.FailNow() } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.DirExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { - if assert.DirExistsf(t, path, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExistsf(t, path, msg, args...) { + return + } t.FailNow() } @@ -94,12 +94,12 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // // assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { - if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + return + } t.FailNow() } @@ -109,12 +109,12 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // // assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { - if assert.ElementsMatchf(t, listA, listB, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatchf(t, listA, listB, msg, args...) { + return + } t.FailNow() } @@ -123,12 +123,12 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Empty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Empty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -137,12 +137,12 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Emptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Emptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -154,12 +154,12 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Equal(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equal(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -169,12 +169,12 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // actualObj, err := SomeFunction() // assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if assert.EqualError(t, theError, errString, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualError(t, theError, errString, msgAndArgs...) { + return + } t.FailNow() } @@ -184,12 +184,12 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // actualObj, err := SomeFunction() // assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { - if assert.EqualErrorf(t, theError, errString, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualErrorf(t, theError, errString, msg, args...) { + return + } t.FailNow() } @@ -198,12 +198,12 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.EqualValues(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValues(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -212,12 +212,12 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // // assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.EqualValuesf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValuesf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -229,12 +229,12 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Equalf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equalf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -245,12 +245,12 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.Error(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Error(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -261,9 +261,37 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Errorf(t, err, msg, args...) { return } + t.FailNow() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } + if h, ok := t.(tHelper); ok { + h.Helper() + } + t.FailNow() +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -274,12 +302,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // // assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Exactly(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactly(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -287,56 +315,56 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Exactlyf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactlyf(t, expected, actual, msg, args...) { + return + } t.FailNow() } // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.Fail(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Fail(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNow fails test func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.FailNow(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNow(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNowf fails test func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.FailNowf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNowf(t, failureMessage, msg, args...) { + return + } t.FailNow() } // Failf reports a failure through func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.Failf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Failf(t, failureMessage, msg, args...) { + return + } t.FailNow() } @@ -344,12 +372,12 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.False(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.False(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -357,34 +385,96 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Falsef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Falsef(t, value, msg, args...) { + return + } t.FailNow() } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.FileExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FileExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.FileExistsf(t, path, msg, args...) { return } + t.FailNow() +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greater(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Greaterf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -395,12 +485,12 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -411,12 +501,12 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -427,12 +517,12 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -443,12 +533,12 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -458,12 +548,12 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -473,12 +563,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -488,12 +578,12 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -503,12 +593,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -518,12 +608,12 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -533,12 +623,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -546,12 +636,12 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.Implements(t, interfaceObject, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implements(t, interfaceObject, object, msgAndArgs...) { + return + } t.FailNow() } @@ -559,12 +649,12 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // // assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { - if assert.Implementsf(t, interfaceObject, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implementsf(t, interfaceObject, object, msg, args...) { + return + } t.FailNow() } @@ -572,56 +662,56 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaSlicef is the same as InDelta, except it compares two slices. func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } @@ -629,132 +719,216 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // // assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.IsType(t, expectedType, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } t.FailNow() } // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.IsTypef(t, expectedType, object, msg, args...) { return } + t.FailNow() +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Len(t, object, length, msgAndArgs...) { + return + } + t.FailNow() +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lenf(t, object, length, msg, args...) { + return + } t.FailNow() } -// JSONEq asserts that two JSON strings are equivalent. +// Less asserts that the first element is less than the second // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if assert.JSONEq(t, expected, actual, msgAndArgs...) { - return - } +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Less(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// JSONEqf asserts that two JSON strings are equivalent. +// LessOrEqual asserts that the first element is less than or equal to the second // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { - if assert.JSONEqf(t, expected, actual, msg, args...) { - return - } +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqual(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. +// LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if assert.Len(t, object, length, msgAndArgs...) { - return - } +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqualf(t, e1, e2, msg, args...) { + return + } t.FailNow() } -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. +// Lessf asserts that the first element is less than the second // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { - if assert.Lenf(t, object, length, msg, args...) { - return - } +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lessf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -762,12 +936,12 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Nil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -775,12 +949,12 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Nilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -791,12 +965,12 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.NoError(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoError(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -807,12 +981,12 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { - if assert.NoErrorf(t, err, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoErrorf(t, err, msg, args...) { + return + } t.FailNow() } @@ -823,12 +997,12 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { // assert.NotContains(t, ["Hello", "World"], "Earth") // assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.NotContains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -839,12 +1013,12 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") // assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.NotContainsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContainsf(t, s, contains, msg, args...) { + return + } t.FailNow() } @@ -855,12 +1029,12 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotEmpty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmpty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -871,12 +1045,12 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // assert.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotEmptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -887,12 +1061,12 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.NotEqual(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqual(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -903,12 +1077,12 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.NotEqualf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqualf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -916,12 +1090,12 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotNil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -929,12 +1103,12 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotNilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -942,12 +1116,12 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // // assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.NotPanics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -955,12 +1129,12 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.NotPanicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -969,12 +1143,12 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.NotRegexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -983,12 +1157,12 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { - if assert.NotRegexpf(t, rx, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexpf(t, rx, str, msg, args...) { + return + } t.FailNow() } @@ -997,12 +1171,12 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // // assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.NotSubset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1011,34 +1185,34 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.NotSubsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } // NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.NotZero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZero(t, i, msgAndArgs...) { + return + } t.FailNow() } // NotZerof asserts that i is not the zero value for its type. func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.NotZerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZerof(t, i, msg, args...) { + return + } t.FailNow() } @@ -1046,12 +1220,12 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // // assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.Panics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1060,12 +1234,12 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1074,12 +1248,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // // assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.PanicsWithValuef(t, expected, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValuef(t, expected, f, msg, args...) { + return + } t.FailNow() } @@ -1087,12 +1261,12 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // // assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.Panicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -1101,12 +1275,12 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.Regexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Regexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -1115,12 +1289,44 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Regexpf(t, rx, str, msg, args...) { return } + t.FailNow() +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Same(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Samef(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -1129,12 +1335,12 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // // assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.Subset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1143,12 +1349,12 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // // assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.Subsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } @@ -1156,12 +1362,12 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.True(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.True(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -1169,12 +1375,12 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Truef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Truef(t, value, msg, args...) { + return + } t.FailNow() } @@ -1182,12 +1388,12 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } @@ -1195,33 +1401,33 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // // assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { - if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.Zero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zero(t, i, msgAndArgs...) { + return + } t.FailNow() } // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.Zerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zerof(t, i, msg, args...) { + return + } t.FailNow() } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go.tmpl b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go.tmpl index 6ffc751b5e5..55e42ddebdc 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,6 +1,6 @@ {{.Comment}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { - if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } t.FailNow() } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go index 9fe41dbdc0c..804fae035bc 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go @@ -216,6 +216,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -304,6 +326,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -568,6 +640,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -590,6 +678,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -878,6 +1016,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability.go b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability.go index c07c55794de..61a90775e59 100644 --- a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability.go +++ b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability.go @@ -60,13 +60,74 @@ type Capabilities interface { Apply(kind CapType) error } -// NewPid create new initialized Capabilities object for given pid when it -// is nonzero, or for the current pid if pid is 0 +// NewPid initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. +// +// Deprecated: Replace with NewPid2. For example, replace: +// +// c, err := NewPid(0) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewPid2(0) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } func NewPid(pid int) (Capabilities, error) { + c, err := newPid(pid) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewPid2 initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. This +// does not load the process's current capabilities; to do that you +// must call Load explicitly. +func NewPid2(pid int) (Capabilities, error) { return newPid(pid) } -// NewFile create new initialized Capabilities object for given named file. -func NewFile(name string) (Capabilities, error) { - return newFile(name) +// NewFile initializes a new Capabilities object for given file path. +// +// Deprecated: Replace with NewFile2. For example, replace: +// +// c, err := NewFile(path) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewFile2(path) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewFile(path string) (Capabilities, error) { + c, err := newFile(path) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewFile2 creates a new initialized Capabilities object for given +// file path. This does not load the process's current capabilities; +// to do that you must call Load explicitly. +func NewFile2(path string) (Capabilities, error) { + return newFile(path) } diff --git a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability_linux.go index 6d2135ac585..1567dc81040 100644 --- a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability_linux.go +++ b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/capability_linux.go @@ -103,21 +103,17 @@ func newPid(pid int) (c Capabilities, err error) { case linuxCapVer1: p := new(capsV1) p.hdr.version = capVers - p.hdr.pid = pid + p.hdr.pid = int32(pid) c = p case linuxCapVer2, linuxCapVer3: p := new(capsV3) p.hdr.version = capVers - p.hdr.pid = pid + p.hdr.pid = int32(pid) c = p default: err = errUnknownVers return } - err = c.Load() - if err != nil { - c = nil - } return } @@ -428,11 +424,11 @@ func (c *capsV3) Load() (err error) { } if strings.HasPrefix(line, "CapB") { fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) - break + continue } if strings.HasPrefix(line, "CapA") { fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) - break + continue } } f.Close() @@ -492,10 +488,6 @@ func (c *capsV3) Apply(kind CapType) (err error) { func newFile(path string) (c Capabilities, err error) { c = &capsFile{path: path} - err = c.Load() - if err != nil { - c = nil - } return } diff --git a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go index eb7170083a3..3d2bf6927f3 100644 --- a/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go +++ b/cluster-autoscaler/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go @@ -13,7 +13,7 @@ import ( type capHeader struct { version uint32 - pid int + pid int32 } type capData struct { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/CHANGELOG.md new file mode 100644 index 00000000000..b11e59ff6a8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## 1.0.0 (2018-03-15) + +Initial release tagging \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile index 6c8413b13a5..a0e68e7a9aa 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile @@ -3,7 +3,8 @@ DIRS := \ nl DEPS = \ - github.com/vishvananda/netns + github.com/vishvananda/netns \ + golang.org/x/sys/unix uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) @@ -18,7 +19,7 @@ $(call goroot,$(DEPS)): .PHONY: $(call testdirs,$(DIRS)) $(call testdirs,$(DIRS)): - sudo -E go test -test.parallel 4 -timeout 60s -v github.com/vishvananda/netlink/$@ + go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@ $(call fmt,$(call testdirs,$(DIRS))): ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q . diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md index 0b61be217e0..a88e2f41840 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md @@ -89,3 +89,4 @@ There are also a few pieces of low level netlink functionality that still need to be implemented. Routing rules are not in place and some of the more advanced link types. Hopefully there is decent structure and testing in place to make these fairly straightforward to add. + diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go index 8808b42d9b6..d59c3281ec7 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -8,6 +8,7 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // IFA_FLAGS is a u32 attribute. @@ -22,7 +23,7 @@ func AddrAdd(link Link, addr *Addr) error { // AddrAdd will add an IP address to a link device. // Equivalent to: `ip addr add $addr dev $link` func (h *Handle) AddrAdd(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -35,7 +36,7 @@ func AddrReplace(link Link, addr *Addr) error { // AddrReplace will replace (or, if not present, add) an IP address on a link device. // Equivalent to: `ip addr replace $addr dev $link` func (h *Handle) AddrReplace(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -48,7 +49,7 @@ func AddrDel(link Link, addr *Addr) error { // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) AddrDel(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -75,7 +76,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error localAddrData = addr.IP.To16() } - localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData) + localData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData) req.AddData(localData) var peerAddrData []byte if addr.Peer != nil { @@ -88,7 +89,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error peerAddrData = localAddrData } - addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData) + addressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData) req.AddData(addressData) if addr.Flags != 0 { @@ -102,21 +103,34 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } } - if addr.Broadcast == nil { - calcBroadcast := make(net.IP, masklen/8) - for i := range localAddrData { - calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i] + if family == FAMILY_V4 { + if addr.Broadcast == nil { + calcBroadcast := make(net.IP, masklen/8) + for i := range localAddrData { + calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i] + } + addr.Broadcast = calcBroadcast + } + req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + + if addr.Label != "" { + labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + req.AddData(labelData) } - addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast)) - if addr.Label != "" { - labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) - req.AddData(labelData) + // 0 is the default value for these attributes. However, 0 means "expired", while the least-surprising default + // value should be "forever". To compensate for that, only add the attributes if at least one of the values is + // non-zero, which means the caller has explicitly set them + if addr.ValidLft > 0 || addr.PreferedLft > 0 { + cachedata := nl.IfaCacheInfo{ + IfaValid: uint32(addr.ValidLft), + IfaPrefered: uint32(addr.PreferedLft), + } + req.AddData(nl.NewRtAttr(unix.IFA_CACHEINFO, cachedata.Serialize())) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -131,11 +145,11 @@ func AddrList(link Link, family int) ([]Addr, error) { // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { - req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) if err != nil { return nil, err } @@ -187,21 +201,21 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { var local, dst *net.IPNet for _, attr := range attrs { switch attr.Attr.Type { - case syscall.IFA_ADDRESS: + case unix.IFA_ADDRESS: dst = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.Peer = dst - case syscall.IFA_LOCAL: + case unix.IFA_LOCAL: local = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.IPNet = local - case syscall.IFA_BROADCAST: + case unix.IFA_BROADCAST: addr.Broadcast = attr.Value - case syscall.IFA_LABEL: + case unix.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) case IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) @@ -236,13 +250,13 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false) } // AddrSubscribeOptions contains a set of options to use with @@ -250,6 +264,7 @@ func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct type AddrSubscribeOptions struct { Namespace *netns.NsHandle ErrorCallback func(error) + ListExisting bool } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -260,11 +275,11 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err } @@ -274,6 +289,15 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c s.Close() }() } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + } go func() { defer close(ch) for { @@ -285,8 +309,22 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c return } for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + native := nl.NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(syscall.Errno(-error)) + } + return + } msgType := m.Header.Type - if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR { + if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { if cberr != nil { cberr(fmt.Errorf("bad message type: %d", msgType)) } @@ -303,7 +341,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c ch <- AddrUpdate{LinkAddress: *addr.IPNet, LinkIndex: ifindex, - NewAddr: msgType == syscall.RTM_NEWADDR, + NewAddr: msgType == unix.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, PreferedLft: addr.PreferedLft, diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bpf_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bpf_linux.go index 533743987a9..6631626bfc0 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bpf_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bpf_linux.go @@ -1,49 +1,12 @@ package netlink -/* -#include -#include -#include -#include -#include -#include +import ( + "unsafe" -static int load_simple_bpf(int prog_type, int ret) { -#ifdef __NR_bpf - // { return ret; } - __u64 __attribute__((aligned(8))) insns[] = { - 0x00000000000000b7ull | ((__u64)ret<<32), - 0x0000000000000095ull, - }; - __u8 __attribute__((aligned(8))) license[] = "ASL2"; - // Copied from a header file since libc is notoriously slow to update. - // The call will succeed or fail and that will be our indication on - // whether or not it is supported. - struct { - __u32 prog_type; - __u32 insn_cnt; - __u64 insns; - __u64 license; - __u32 log_level; - __u32 log_size; - __u64 log_buf; - __u32 kern_version; - } __attribute__((aligned(8))) attr = { - .prog_type = prog_type, - .insn_cnt = 2, - .insns = (uintptr_t)&insns, - .license = (uintptr_t)&license, - }; - return syscall(__NR_bpf, 5, &attr, sizeof(attr)); -#else - errno = EINVAL; - return -1; -#endif -} -*/ -import "C" + "golang.org/x/sys/unix" +) -type BpfProgType C.int +type BpfProgType uint32 const ( BPF_PROG_TYPE_UNSPEC BpfProgType = iota @@ -55,8 +18,36 @@ const ( BPF_PROG_TYPE_XDP ) -// loadSimpleBpf loads a trivial bpf program for testing purposes -func loadSimpleBpf(progType BpfProgType, ret int) (int, error) { - fd, err := C.load_simple_bpf(C.int(progType), C.int(ret)) - return int(fd), err +type BPFAttr struct { + ProgType uint32 + InsnCnt uint32 + Insns uintptr + License uintptr + LogLevel uint32 + LogSize uint32 + LogBuf uintptr + KernVersion uint32 +} + +// loadSimpleBpf loads a trivial bpf program for testing purposes. +func loadSimpleBpf(progType BpfProgType, ret uint32) (int, error) { + insns := []uint64{ + 0x00000000000000b7 | (uint64(ret) << 32), + 0x0000000000000095, + } + license := []byte{'A', 'S', 'L', '2', '\x00'} + attr := BPFAttr{ + ProgType: uint32(progType), + InsnCnt: uint32(len(insns)), + Insns: uintptr(unsafe.Pointer(&insns[0])), + License: uintptr(unsafe.Pointer(&license[0])), + } + fd, _, errno := unix.Syscall(unix.SYS_BPF, + 5, /* bpf cmd */ + uintptr(unsafe.Pointer(&attr)), + unsafe.Sizeof(attr)) + if errno != 0 { + return 0, errno + } + return int(fd), nil } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go index a65d6a1319a..350ab0db4b0 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -2,9 +2,9 @@ package netlink import ( "fmt" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // BridgeVlanList gets a map of device id to bridge vlan infos. @@ -16,12 +16,12 @@ func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) - req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) + req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { } for _, attr := range attrs { switch attr.Attr.Type { - case nl.IFLA_AF_SPEC: + case unix.IFLA_AF_SPEC: //nested attr nestAttrs, err := nl.ParseRouteAttr(attr.Value) if err != nil { @@ -63,7 +63,7 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,19 +75,19 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) } func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) - br := nl.NewRtAttr(nl.IFLA_AF_SPEC, nil) + br := nl.NewRtAttr(unix.IFLA_AF_SPEC, nil) var flags uint16 if self { flags |= nl.BRIDGE_FLAGS_SELF @@ -107,7 +107,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged } nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go index 91cd3883de9..a4997740e29 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // NOTE: function is in here because it uses other linux functions @@ -50,7 +51,7 @@ func ClassDel(class Class) error { // ClassDel will delete a class from the system. // Equivalent to: `tc class del $class` func (h *Handle) ClassDel(class Class) error { - return h.classModify(syscall.RTM_DELTCLASS, 0, class) + return h.classModify(unix.RTM_DELTCLASS, 0, class) } // ClassChange will change a class in place @@ -64,7 +65,7 @@ func ClassChange(class Class) error { // Equivalent to: `tc class change $class` // The parent and handle MUST NOT be changed. func (h *Handle) ClassChange(class Class) error { - return h.classModify(syscall.RTM_NEWTCLASS, 0, class) + return h.classModify(unix.RTM_NEWTCLASS, 0, class) } // ClassReplace will replace a class to the system. @@ -82,7 +83,7 @@ func ClassReplace(class Class) error { // If a class already exist with this parent/handle pair, the class is changed. // If a class does not already exist with this parent/handle, a new class is created. func (h *Handle) ClassReplace(class Class) error { - return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) + return h.classModify(unix.RTM_NEWTCLASS, unix.NLM_F_CREATE, class) } // ClassAdd will add a class to the system. @@ -95,14 +96,14 @@ func ClassAdd(class Class) error { // Equivalent to: `tc class add $class` func (h *Handle) ClassAdd(class Class) error { return h.classModify( - syscall.RTM_NEWTCLASS, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + unix.RTM_NEWTCLASS, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, class, ) } func (h *Handle) classModify(cmd, flags int, class Class) error { - req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) base := class.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -112,12 +113,12 @@ func (h *Handle) classModify(cmd, flags int, class Class) error { } req.AddData(msg) - if cmd != syscall.RTM_DELTCLASS { + if cmd != unix.RTM_DELTCLASS { if err := classPayload(req, class); err != nil { return err } } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -141,12 +142,12 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { var rtab [256]uint32 var ctab [256]uint32 tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} - if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcrate, rtab[:], cellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate rate table") } opt.Rate = tcrate tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} - if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcceil, ctab[:], ccellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate ceil rate table") } opt.Ceil = tcceil @@ -169,7 +170,7 @@ func ClassList(link Link, parent uint32) ([]Class, error) { // Equivalent to: `tc class show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { - req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -181,7 +182,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go index ecf04456590..a0fc74a3722 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // ConntrackTableType Conntrack table for the netlink operation @@ -85,8 +85,8 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) // conntrack -F [table] Flush table // The flush operation applies to all the family types func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { - req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) - _, err := req.Execute(syscall.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, unix.AF_INET, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_NETFILTER, 0) return err } @@ -102,10 +102,10 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami for _, dataRaw := range res { flow := parseRawData(dataRaw) if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(syscall.NETLINK_NETFILTER, 0) + req2.Execute(unix.NETLINK_NETFILTER, 0) matched++ } } @@ -127,8 +127,8 @@ func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily } func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { - req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP) - return req.Execute(syscall.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, unix.NLM_F_DUMP) + return req.Execute(unix.NETLINK_NETFILTER, 0) } // The full conntrack flow structure is very complicated and can be found in the file: diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go index 1120c79d6a9..c2cf8e4dcf5 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go @@ -17,7 +17,7 @@ type FilterAttrs struct { Handle uint32 Parent uint32 Priority uint16 // lower is higher priority - Protocol uint16 // syscall.ETH_P_* + Protocol uint16 // unix.ETH_P_* } func (q FilterAttrs) String() string { @@ -225,6 +225,21 @@ func (filter *U32) Type() string { return "u32" } +// MatchAll filters match all packets +type MatchAll struct { + FilterAttrs + ClassId uint32 + Actions []Action +} + +func (filter *MatchAll) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *MatchAll) Type() string { + return "matchall" +} + type FilterFwAttrs struct { ClassId uint32 InDev string diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go index 5025bd56c1b..f0eac6b78cc 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -9,6 +9,7 @@ import ( "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // Constants used in TcU32Sel.Flags. @@ -55,7 +56,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.Rate.Rate != 0 { police.Rate.Mpu = fattrs.Mpu police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("TBF: failed to calculate rate table") } police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) @@ -64,7 +65,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.PeakRate.Rate != 0 { police.PeakRate.Mpu = fattrs.Mpu police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("POLICE: failed to calculate peak rate table") } } @@ -98,7 +99,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -109,7 +110,7 @@ func (h *Handle) FilterDel(filter Filter) error { } req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -123,7 +124,7 @@ func FilterAdd(filter Filter) error { // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -221,10 +222,18 @@ func (h *Handle) FilterAdd(filter Filter) error { bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT } nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) + case *MatchAll: + actionsAttr := nl.NewRtAttrChild(options, nl.TCA_MATCHALL_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + if filter.ClassId != 0 { + nl.NewRtAttrChild(options, nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) + } } req.AddData(options) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -239,7 +248,7 @@ func FilterList(link Link, parent uint32) ([]Filter, error) { // Equivalent to: `tc filter show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { - req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -251,7 +260,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) if err != nil { return nil, err } @@ -287,6 +296,8 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { filter = &Fw{} case "bpf": filter = &BpfFilter{} + case "matchall": + filter = &MatchAll{} default: filter = &GenericFilter{FilterType: filterType} } @@ -311,6 +322,11 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { if err != nil { return nil, err } + case "matchall": + detailed, err = parseMatchAllData(filter, data) + if err != nil { + return nil, err + } default: detailed = true } @@ -540,6 +556,28 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) return detailed, nil } +func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + matchall := filter.(*MatchAll) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_MATCHALL_CLASSID: + matchall.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_MATCHALL_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + matchall.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } + } + } + return detailed, nil +} + func AlignToAtm(size uint) uint { var linksize, cells int cells = int(size / nl.ATM_CELL_PAYLOAD) @@ -562,7 +600,7 @@ func AdjustSize(sz uint, mpu uint, linklayer int) uint { } } -func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int { +func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, linklayer int) int { bps := rate.Rate mpu := rate.Mpu var sz uint diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go new file mode 100644 index 00000000000..71e73c37a0a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go @@ -0,0 +1,21 @@ +package netlink + +import ( + "errors" +) + +var ( + // ErrAttrHeaderTruncated is returned when a netlink attribute's header is + // truncated. + ErrAttrHeaderTruncated = errors.New("attribute header truncated") + // ErrAttrBodyTruncated is returned when a netlink attribute's body is + // truncated. + ErrAttrBodyTruncated = errors.New("attribute body truncated") +) + +type Fou struct { + Family int + Port int + Protocol int + EncapType int +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go new file mode 100644 index 00000000000..62d59bd2d09 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go @@ -0,0 +1,215 @@ +// +build linux + +package netlink + +import ( + "encoding/binary" + "errors" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + FOU_GENL_NAME = "fou" +) + +const ( + FOU_CMD_UNSPEC uint8 = iota + FOU_CMD_ADD + FOU_CMD_DEL + FOU_CMD_GET + FOU_CMD_MAX = FOU_CMD_GET +) + +const ( + FOU_ATTR_UNSPEC = iota + FOU_ATTR_PORT + FOU_ATTR_AF + FOU_ATTR_IPPROTO + FOU_ATTR_TYPE + FOU_ATTR_REMCSUM_NOPARTIAL + FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL +) + +const ( + FOU_ENCAP_UNSPEC = iota + FOU_ENCAP_DIRECT + FOU_ENCAP_GUE + FOU_ENCAP_MAX = FOU_ENCAP_GUE +) + +var fouFamilyId int + +func FouFamilyId() (int, error) { + if fouFamilyId != 0 { + return fouFamilyId, nil + } + + fam, err := GenlFamilyGet(FOU_GENL_NAME) + if err != nil { + return -1, err + } + + fouFamilyId = int(fam.ID) + return fouFamilyId, nil +} + +func FouAdd(f Fou) error { + return pkgHandle.FouAdd(f) +} + +func (h *Handle) FouAdd(f Fou) error { + fam_id, err := FouFamilyId() + if err != nil { + return err + } + + // setting ip protocol conflicts with encapsulation type GUE + if f.EncapType == FOU_ENCAP_GUE && f.Protocol != 0 { + return errors.New("GUE encapsulation doesn't specify an IP protocol") + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) + + // int to byte for port + bp := make([]byte, 2) + binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_PORT, bp), + nl.NewRtAttr(FOU_ATTR_TYPE, []byte{uint8(f.EncapType)}), + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), + nl.NewRtAttr(FOU_ATTR_IPPROTO, []byte{uint8(f.Protocol)}), + } + raw := []byte{FOU_CMD_ADD, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return err + } + + return nil +} + +func FouDel(f Fou) error { + return pkgHandle.FouDel(f) +} + +func (h *Handle) FouDel(f Fou) error { + fam_id, err := FouFamilyId() + if err != nil { + return err + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) + + // int to byte for port + bp := make([]byte, 2) + binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_PORT, bp), + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), + } + raw := []byte{FOU_CMD_DEL, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return err + } + + return nil +} + +func FouList(fam int) ([]Fou, error) { + return pkgHandle.FouList(fam) +} + +func (h *Handle) FouList(fam int) ([]Fou, error) { + fam_id, err := FouFamilyId() + if err != nil { + return nil, err + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_DUMP) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(fam)}), + } + raw := []byte{FOU_CMD_GET, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + fous := make([]Fou, 0, len(msgs)) + for _, m := range msgs { + f, err := deserializeFouMsg(m) + if err != nil { + return fous, err + } + + fous = append(fous, f) + } + + return fous, nil +} + +func deserializeFouMsg(msg []byte) (Fou, error) { + // we'll skip to byte 4 to first attribute + msg = msg[3:] + var shift int + fou := Fou{} + + for { + // attribute header is at least 16 bits + if len(msg) < 4 { + return fou, ErrAttrHeaderTruncated + } + + lgt := int(binary.BigEndian.Uint16(msg[0:2])) + if len(msg) < lgt+4 { + return fou, ErrAttrBodyTruncated + } + attr := binary.BigEndian.Uint16(msg[2:4]) + + shift = lgt + 3 + switch attr { + case FOU_ATTR_AF: + fou.Family = int(msg[5]) + case FOU_ATTR_PORT: + fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) + // port is 2 bytes + shift = lgt + 2 + case FOU_ATTR_IPPROTO: + fou.Protocol = int(msg[5]) + case FOU_ATTR_TYPE: + fou.EncapType = int(msg[5]) + } + + msg = msg[shift:] + + if len(msg) < 4 { + break + } + } + + return fou, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go new file mode 100644 index 00000000000..3a8365bfe62 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go @@ -0,0 +1,15 @@ +// +build !linux + +package netlink + +func FouAdd(f Fou) error { + return ErrNotImplemented +} + +func FouDel(f Fou) error { + return ErrNotImplemented +} + +func FouList(fam int) ([]Fou, error) { + return nil, ErrNotImplemented +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go index a388a87001c..ce7969907d4 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) type GenlOp struct { @@ -130,9 +131,9 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { Command: nl.GENL_CTRL_CMD_GETFAMILY, Version: nl.GENL_CTRL_VERSION, } - req := h.newNetlinkRequest(nl.GENL_ID_CTRL, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -151,7 +152,7 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) { req := h.newNetlinkRequest(nl.GENL_ID_CTRL, 0) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_CTRL_ATTR_FAMILY_NAME, nl.ZeroTerminated(name))) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go index 7331303ecbe..f5e160ba5c0 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -7,6 +7,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) type PDP struct { @@ -82,9 +83,9 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { Command: nl.GENL_GTP_CMD_GETPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -96,7 +97,7 @@ func GTPPDPList() ([]*PDP, error) { } func gtpPDPGet(req *nl.NetlinkRequest) (*PDP, error) { - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -182,7 +183,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_NEWPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -199,7 +200,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(syscall.NETLINK_GENERIC, 0) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) return err } @@ -216,7 +217,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_DELPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -229,7 +230,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(syscall.NETLINK_GENERIC, 0) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go index d37b087c33e..9f6d7fe0fbd 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -2,11 +2,11 @@ package netlink import ( "fmt" - "syscall" "time" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // Empty handle used by the netlink package methods @@ -43,7 +43,7 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { if to < time.Microsecond { return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond) } - tv := syscall.NsecToTimeval(to.Nanoseconds()) + tv := unix.NsecToTimeval(to.Nanoseconds()) for _, sh := range h.sockets { if err := sh.Socket.SetSendTimeout(&tv); err != nil { return err @@ -59,13 +59,13 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { // socket in the netlink handle. The maximum value is capped by // /proc/sys/net/core/rmem_max. func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error { - opt := syscall.SO_RCVBUF + opt := unix.SO_RCVBUF if force { - opt = syscall.SO_RCVBUFFORCE + opt = unix.SO_RCVBUFFORCE } for _, sh := range h.sockets { fd := sh.Socket.GetFd() - err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, opt, size) + err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, opt, size) if err != nil { return err } @@ -81,7 +81,7 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { i := 0 for _, sh := range h.sockets { fd := sh.Socket.GetFd() - size, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF) + size, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF) if err != nil { return nil, err } @@ -134,10 +134,10 @@ func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest { return nl.NewNetlinkRequest(proto, flags) } return &nl.NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.SizeofNlMsghdr), + NlMsghdr: unix.NlMsghdr{ + Len: uint32(unix.SizeofNlMsghdr), Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), + Flags: unix.NLM_F_REQUEST | uint16(flags), }, Sockets: h.sockets, } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_unspecified.go index 7da21a6a184..915b765ded7 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -220,3 +220,39 @@ func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { return nil, ErrNotImplemented } + +func (h *Handle) RouteAdd(route *Route) error { + return ErrNotImplemented +} + +func (h *Handle) RouteDel(route *Route) error { + return ErrNotImplemented +} + +func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { + return nil, ErrNotImplemented +} + +func (h *Handle) RouteList(link Link, family int) ([]Route, error) { + return nil, ErrNotImplemented +} + +func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return nil, ErrNotImplemented +} + +func (h *Handle) RouteReplace(route *Route) error { + return ErrNotImplemented +} + +func (h *Handle) RuleAdd(rule *Rule) error { + return ErrNotImplemented +} + +func (h *Handle) RuleDel(rule *Rule) error { + return ErrNotImplemented +} + +func (h *Handle) RuleList(family int) ([]Rule, error) { + return nil, ErrNotImplemented +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/ioctl_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/ioctl_linux.go new file mode 100644 index 00000000000..a8503126d56 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/ioctl_linux.go @@ -0,0 +1,98 @@ +package netlink + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// ioctl for statistics. +const ( + // ETHTOOL_GSSET_INFO gets string set info + ETHTOOL_GSSET_INFO = 0x00000037 + // SIOCETHTOOL is Ethtool interface + SIOCETHTOOL = 0x8946 + // ETHTOOL_GSTRINGS gets specified string set + ETHTOOL_GSTRINGS = 0x0000001b + // ETHTOOL_GSTATS gets NIC-specific statistics + ETHTOOL_GSTATS = 0x0000001d +) + +// string set id. +const ( + // ETH_SS_TEST is self-test result names, for use with %ETHTOOL_TEST + ETH_SS_TEST = iota + // ETH_SS_STATS statistic names, for use with %ETHTOOL_GSTATS + ETH_SS_STATS + // ETH_SS_PRIV_FLAGS are driver private flag names + ETH_SS_PRIV_FLAGS + // _ETH_SS_NTUPLE_FILTERS is deprecated + _ETH_SS_NTUPLE_FILTERS + // ETH_SS_FEATURES are device feature names + ETH_SS_FEATURES + // ETH_SS_RSS_HASH_FUNCS is RSS hush function names + ETH_SS_RSS_HASH_FUNCS +) + +// IfreqSlave is a struct for ioctl bond manipulation syscalls. +// It is used to assign slave to bond interface with Name. +type IfreqSlave struct { + Name [unix.IFNAMSIZ]byte + Slave [unix.IFNAMSIZ]byte +} + +// Ifreq is a struct for ioctl ethernet manipulation syscalls. +type Ifreq struct { + Name [unix.IFNAMSIZ]byte + Data uintptr +} + +// ethtoolSset is a string set information +type ethtoolSset struct { + cmd uint32 + reserved uint32 + mask uint64 + data [1]uint32 +} + +// ethtoolGstrings is string set for data tagging +type ethtoolGstrings struct { + cmd uint32 + stringSet uint32 + length uint32 + data [32]byte +} + +type ethtoolStats struct { + cmd uint32 + nStats uint32 + data [1]uint64 +} + +// newIocltSlaveReq returns filled IfreqSlave with proper interface names +// It is used by ioctl to assign slave to bond master +func newIocltSlaveReq(slave, master string) *IfreqSlave { + ifreq := &IfreqSlave{} + copy(ifreq.Name[:unix.IFNAMSIZ-1], master) + copy(ifreq.Slave[:unix.IFNAMSIZ-1], slave) + return ifreq +} + +// newIocltStringSetReq creates request to get interface string set +func newIocltStringSetReq(linkName string) (*Ifreq, *ethtoolSset) { + e := ðtoolSset{ + cmd: ETHTOOL_GSSET_INFO, + mask: 1 << ETH_SS_STATS, + } + + ifreq := &Ifreq{Data: uintptr(unsafe.Pointer(e))} + copy(ifreq.Name[:unix.IFNAMSIZ-1], linkName) + return ifreq, e +} + +// getSocketUDP returns file descriptor to new UDP socket +// It is used for communication with ioctl interface. +func getSocketUDP() (int, error) { + return syscall.Socket(unix.AF_INET, unix.SOCK_DGRAM, 0) +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go index 5aa3a1790ab..fe74ffab964 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go @@ -3,6 +3,7 @@ package netlink import ( "fmt" "net" + "os" ) // Link represents a link device from netlink. Shared link attributes @@ -38,6 +39,8 @@ type LinkAttrs struct { Protinfo *Protinfo OperState LinkOperState NetNsID int + NumTxQueues int + NumRxQueues int } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -259,6 +262,9 @@ const ( type Macvlan struct { LinkAttrs Mode MacvlanMode + + // MACAddrs is only populated for Macvlan SOURCE links + MACAddrs []net.HardwareAddr } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -284,8 +290,10 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag + Mode TuntapMode + Flags TuntapFlag + Queues int + Fds []*os.File } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -327,26 +335,28 @@ func (generic *GenericLink) Type() string { type Vxlan struct { LinkAttrs - VxlanId int - VtepDevIndex int - SrcAddr net.IP - Group net.IP - TTL int - TOS int - Learning bool - Proxy bool - RSC bool - L2miss bool - L3miss bool - UDPCSum bool - NoAge bool - GBP bool - FlowBased bool - Age int - Limit int - Port int - PortLow int - PortHigh int + VxlanId int + VtepDevIndex int + SrcAddr net.IP + Group net.IP + TTL int + TOS int + Learning bool + Proxy bool + RSC bool + L2miss bool + L3miss bool + UDPCSum bool + UDP6ZeroCSumTx bool + UDP6ZeroCSumRx bool + NoAge bool + GBP bool + FlowBased bool + Age int + Limit int + Port int + PortLow int + PortHigh int } func (vxlan *Vxlan) Attrs() *LinkAttrs { @@ -695,17 +705,25 @@ func (gretap *Gretap) Attrs() *LinkAttrs { } func (gretap *Gretap) Type() string { + if gretap.Local.To4() == nil { + return "ip6gretap" + } return "gretap" } type Iptun struct { LinkAttrs - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - Link uint32 - Local net.IP - Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Link uint32 + Local net.IP + Remote net.IP + EncapSport uint16 + EncapDport uint16 + EncapType uint16 + EncapFlags uint16 + FlowBased bool } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -716,6 +734,28 @@ func (iptun *Iptun) Type() string { return "ipip" } +type Sittun struct { + LinkAttrs + Link uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 +} + +func (sittun *Sittun) Attrs() *LinkAttrs { + return &sittun.LinkAttrs +} + +func (sittun *Sittun) Type() string { + return "sit" +} + type Vti struct { LinkAttrs IKey uint32 @@ -735,16 +775,20 @@ func (iptun *Vti) Type() string { type Gretun struct { LinkAttrs - Link uint32 - IFlags uint16 - OFlags uint16 - IKey uint32 - OKey uint32 - Local net.IP - Remote net.IP - Ttl uint8 - Tos uint8 - PMtuDisc uint8 + Link uint32 + IFlags uint16 + OFlags uint16 + IKey uint32 + OKey uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 } func (gretun *Gretun) Attrs() *LinkAttrs { @@ -752,6 +796,9 @@ func (gretun *Gretun) Attrs() *LinkAttrs { } func (gretun *Gretun) Type() string { + if gretun.Local.To4() == nil { + return "ip6gre" + } return "gre" } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go index e94fd9766cd..540191ed843 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go @@ -11,22 +11,24 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) const ( SizeofLinkStats32 = 0x5c SizeofLinkStats64 = 0xd8 - IFLA_STATS64 = 0x17 // syscall pkg does not contain this one ) const ( - TUNTAP_MODE_TUN TuntapMode = syscall.IFF_TUN - TUNTAP_MODE_TAP TuntapMode = syscall.IFF_TAP - TUNTAP_DEFAULTS TuntapFlag = syscall.IFF_TUN_EXCL | syscall.IFF_ONE_QUEUE - TUNTAP_VNET_HDR TuntapFlag = syscall.IFF_VNET_HDR - TUNTAP_TUN_EXCL TuntapFlag = syscall.IFF_TUN_EXCL - TUNTAP_NO_PI TuntapFlag = syscall.IFF_NO_PI - TUNTAP_ONE_QUEUE TuntapFlag = syscall.IFF_ONE_QUEUE + TUNTAP_MODE_TUN TuntapMode = unix.IFF_TUN + TUNTAP_MODE_TAP TuntapMode = unix.IFF_TAP + TUNTAP_DEFAULTS TuntapFlag = unix.IFF_TUN_EXCL | unix.IFF_ONE_QUEUE + TUNTAP_VNET_HDR TuntapFlag = unix.IFF_VNET_HDR + TUNTAP_TUN_EXCL TuntapFlag = unix.IFF_TUN_EXCL + TUNTAP_NO_PI TuntapFlag = unix.IFF_NO_PI + TUNTAP_ONE_QUEUE TuntapFlag = unix.IFF_ONE_QUEUE + TUNTAP_MULTI_QUEUE TuntapFlag = unix.IFF_MULTI_QUEUE + TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) var lookupByDump = false @@ -61,15 +63,15 @@ func (h *Handle) ensureIndex(link *LinkAttrs) { func (h *Handle) LinkSetARPOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change |= syscall.IFF_NOARP - msg.Flags |= syscall.IFF_NOARP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags |= unix.IFF_NOARP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -80,15 +82,15 @@ func LinkSetARPOff(link Link) error { func (h *Handle) LinkSetARPOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change |= syscall.IFF_NOARP - msg.Flags &= ^uint32(syscall.IFF_NOARP) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags &= ^uint32(unix.IFF_NOARP) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -99,15 +101,84 @@ func LinkSetARPOn(link Link) error { func (h *Handle) SetPromiscOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_PROMISC - msg.Flags = syscall.IFF_PROMISC + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Flags = unix.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrAdd(link, addr) +} + +func (h *Handle) MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_ADD) +} + +func MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrDel(link, addr) +} + +func (h *Handle) MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_DEL) +} + +func MacvlanMACAddrFlush(link Link) error { + return pkgHandle.MacvlanMACAddrFlush(link) +} + +func (h *Handle) MacvlanMACAddrFlush(link Link) error { + return h.macvlanMACAddrChange(link, nil, nl.MACVLAN_MACADDR_FLUSH) +} + +func MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrSet(link, addrs) +} + +func (h *Handle) MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, addrs, nl.MACVLAN_MACADDR_SET) +} + +func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + // IFLA_MACVLAN_MACADDR_MODE = mode + b := make([]byte, 4) + native.PutUint32(b, mode) + nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b) + + // populate message with MAC addrs, if necessary + switch mode { + case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: + if len(addrs) == 1 { + nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) + } + case nl.MACVLAN_MACADDR_SET: + mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil) + for _, addr := range addrs { + nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr)) + } + } + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -118,7 +189,7 @@ func BridgeSetMcastSnoop(link Link, on bool) error { func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { bridge := link.(*Bridge) bridge.MulticastSnooping = &on - return h.linkModify(bridge, syscall.NLM_F_ACK) + return h.linkModify(bridge, unix.NLM_F_ACK) } func SetPromiscOn(link Link) error { @@ -128,15 +199,15 @@ func SetPromiscOn(link Link) error { func (h *Handle) SetPromiscOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_PROMISC - msg.Flags = 0 & ^syscall.IFF_PROMISC + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Flags = 0 & ^unix.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -155,15 +226,15 @@ func LinkSetUp(link Link) error { func (h *Handle) LinkSetUp(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = syscall.IFF_UP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -178,15 +249,15 @@ func LinkSetDown(link Link) error { func (h *Handle) LinkSetDown(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = 0 & ^syscall.IFF_UP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Flags = 0 & ^unix.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -201,19 +272,19 @@ func LinkSetMTU(link Link, mtu int) error { func (h *Handle) LinkSetMTU(link Link, mtu int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(mtu)) - data := nl.NewRtAttr(syscall.IFLA_MTU, b) + data := nl.NewRtAttr(unix.IFLA_MTU, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -228,16 +299,16 @@ func LinkSetName(link Link, name string) error { func (h *Handle) LinkSetName(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name)) + data := nl.NewRtAttr(unix.IFLA_IFNAME, []byte(name)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -252,16 +323,16 @@ func LinkSetAlias(link Link, name string) error { func (h *Handle) LinkSetAlias(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_IFALIAS, []byte(name)) + data := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(name)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -276,16 +347,16 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr)) + data := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(hwaddr)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -300,13 +371,13 @@ func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) vfmsg := nl.VfMac{ Vf: uint32(vf), @@ -315,7 +386,7 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -330,13 +401,13 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) vfmsg := nl.VfVlan{ Vf: uint32(vf), @@ -345,7 +416,7 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -360,13 +431,13 @@ func LinkSetVfTxRate(link Link, vf, rate int) error { func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) vfmsg := nl.VfTxRate{ Vf: uint32(vf), @@ -375,7 +446,7 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -391,13 +462,13 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) if check { setting = 1 @@ -409,7 +480,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -425,13 +496,13 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) if state { setting = 1 @@ -443,7 +514,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -491,19 +562,19 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error { func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(masterIndex)) - data := nl.NewRtAttr(syscall.IFLA_MASTER, b) + data := nl.NewRtAttr(unix.IFLA_MASTER, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -520,19 +591,19 @@ func LinkSetNsPid(link Link, nspid int) error { func (h *Handle) LinkSetNsPid(link Link, nspid int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(nspid)) - data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b) + data := nl.NewRtAttr(unix.IFLA_NET_NS_PID, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -549,19 +620,19 @@ func LinkSetNsFd(link Link, fd int) error { func (h *Handle) LinkSetNsFd(link Link, fd int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(fd)) - data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b) + data := nl.NewRtAttr(unix.IFLA_NET_NS_FD, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -576,15 +647,15 @@ func LinkSetXdpFd(link Link, fd int) error { func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { base := link.Attrs() ensureIndex(base) - req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := nl.NewNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -642,6 +713,8 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) if vxlan.UDPCSum { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) @@ -766,6 +839,12 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { } } +func cleanupFds(fds []*os.File) { + for _, f := range fds { + f.Close() + } +} + // LinkAdd adds a new link device. The type and features of the device // are taken from the parameters in the link object. // Equivalent to: `ip link add $link` @@ -777,7 +856,7 @@ func LinkAdd(link Link) error { // are taken fromt the parameters in the link object. // Equivalent to: `ip link add $link` func (h *Handle) LinkAdd(link Link) error { - return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) } func (h *Handle) linkModify(link Link, flags int) error { @@ -791,104 +870,155 @@ func (h *Handle) linkModify(link Link, flags int) error { if tuntap, ok := link.(*Tuntap); ok { // TODO: support user // TODO: support group - // TODO: multi_queue // TODO: support non- persistent - if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { + if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) } - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() + + queues := tuntap.Queues + + var fds []*os.File var req ifReq - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_DEFAULTS) + copy(req.Name[:15], base.Name) + + req.Flags = uint16(tuntap.Flags) + + if queues == 0 { //Legacy compatibility + queues = 1 + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) + } } else { - req.Flags = uint16(tuntap.Flags) + // For best peformance set Flags to TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR + // when a) KVM has support for this ABI and + // b) the value of the flag is queryable using the TUNGETIFF ioctl + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_MULTI_QUEUE_DEFAULTS) + } } + req.Flags |= uint16(tuntap.Mode) - copy(req.Name[:15], base.Name) - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) - if errno != 0 { - return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) + + for i := 0; i < queues; i++ { + localReq := req + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + cleanupFds(fds) + return err + } + + fds = append(fds, file) + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) + } } - _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) if errno != 0 { + cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) } + h.ensureIndex(base) // can't set master during create, so set it afterwards if base.MasterIndex != 0 { // TODO: verify MasterIndex is actually a bridge? - return h.LinkSetMasterByIndex(link, base.MasterIndex) + err := h.LinkSetMasterByIndex(link, base.MasterIndex) + if err != nil { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + cleanupFds(fds) + return err + } + } + + if tuntap.Queues == 0 { + cleanupFds(fds) + } else { + tuntap.Fds = fds } + return nil } - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, flags) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) // TODO: make it shorter if base.Flags&net.FlagUp != 0 { - msg.Change = syscall.IFF_UP - msg.Flags = syscall.IFF_UP + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP } if base.Flags&net.FlagBroadcast != 0 { - msg.Change |= syscall.IFF_BROADCAST - msg.Flags |= syscall.IFF_BROADCAST + msg.Change |= unix.IFF_BROADCAST + msg.Flags |= unix.IFF_BROADCAST } if base.Flags&net.FlagLoopback != 0 { - msg.Change |= syscall.IFF_LOOPBACK - msg.Flags |= syscall.IFF_LOOPBACK + msg.Change |= unix.IFF_LOOPBACK + msg.Flags |= unix.IFF_LOOPBACK } if base.Flags&net.FlagPointToPoint != 0 { - msg.Change |= syscall.IFF_POINTOPOINT - msg.Flags |= syscall.IFF_POINTOPOINT + msg.Change |= unix.IFF_POINTOPOINT + msg.Flags |= unix.IFF_POINTOPOINT } if base.Flags&net.FlagMulticast != 0 { - msg.Change |= syscall.IFF_MULTICAST - msg.Flags |= syscall.IFF_MULTICAST + msg.Change |= unix.IFF_MULTICAST + msg.Flags |= unix.IFF_MULTICAST } + if base.Index != 0 { + msg.Index = int32(base.Index) + } + req.AddData(msg) if base.ParentIndex != 0 { b := make([]byte, 4) native.PutUint32(b, uint32(base.ParentIndex)) - data := nl.NewRtAttr(syscall.IFLA_LINK, b) + data := nl.NewRtAttr(unix.IFLA_LINK, b) req.AddData(data) } else if link.Type() == "ipvlan" { return fmt.Errorf("Can't create ipvlan link without ParentIndex") } - nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) if base.MTU > 0 { - mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) } if base.TxQLen >= 0 { - qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + qlen := nl.NewRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) req.AddData(qlen) } if base.HardwareAddr != nil { - hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr)) + hwaddr := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(base.HardwareAddr)) req.AddData(hwaddr) } + if base.NumTxQueues > 0 { + txqueues := nl.NewRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + req.AddData(txqueues) + } + + if base.NumRxQueues > 0 { + rxqueues := nl.NewRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + req.AddData(rxqueues) + } + if base.Namespace != nil { var attr *nl.RtAttr switch base.Namespace.(type) { case NsPid: val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) - attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) + attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) case NsFd: val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) - attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) + attr = nl.NewRtAttr(unix.IFLA_NET_NS_FD, val) } req.AddData(attr) @@ -898,7 +1028,7 @@ func (h *Handle) linkModify(link Link, flags int) error { addXdpAttrs(base.Xdp, req) } - linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) switch link := link.(type) { @@ -910,13 +1040,13 @@ func (h *Handle) linkModify(link Link, flags int) error { case *Veth: data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) - nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) - nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) + nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) if base.TxQLen >= 0 { - nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } if base.MTU > 0 { - nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } case *Vxlan: @@ -940,6 +1070,8 @@ func (h *Handle) linkModify(link Link, flags int) error { addGretapAttrs(link, linkInfo) case *Iptun: addIptunAttrs(link, linkInfo) + case *Sittun: + addSittunAttrs(link, linkInfo) case *Gretun: addGretunAttrs(link, linkInfo) case *Vti: @@ -954,7 +1086,7 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(linkInfo) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } @@ -984,13 +1116,13 @@ func (h *Handle) LinkDel(link Link) error { h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -1033,16 +1165,16 @@ func (h *Handle) LinkByName(name string) (Link, error) { return h.linkByNameDump(name) } - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) req.AddData(nameData) link, err := execGetLink(req) - if err == syscall.EINVAL { + if err == unix.EINVAL { // older kernels don't support looking up via IFLA_IFNAME // so fall back to dumping all links h.lookupByDump = true @@ -1065,16 +1197,16 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) { return h.linkByAliasDump(alias) } - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) req.AddData(nameData) link, err := execGetLink(req) - if err == syscall.EINVAL { + if err == unix.EINVAL { // older kernels don't support looking up via IFLA_IFALIAS // so fall back to dumping all links h.lookupByDump = true @@ -1091,9 +1223,9 @@ func LinkByIndex(index int) (Link, error) { // LinkByIndex finds a link by index and returns a pointer to the object. func (h *Handle) LinkByIndex(index int) (Link, error) { - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(index) req.AddData(msg) @@ -1101,10 +1233,10 @@ func (h *Handle) LinkByIndex(index int) (Link, error) { } func execGetLink(req *nl.NetlinkRequest) (Link, error) { - msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { if errno, ok := err.(syscall.Errno); ok { - if errno == syscall.ENODEV { + if errno == unix.ENODEV { return nil, LinkNotFoundError{fmt.Errorf("Link not found")} } } @@ -1125,7 +1257,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { // linkDeserialize deserializes a raw message received from netlink into // a link object. -func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { +func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) @@ -1134,7 +1266,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { } base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} - if msg.Flags&syscall.IFF_PROMISC != 0 { + if msg.Flags&unix.IFF_PROMISC != 0 { base.Promisc = 1 } var ( @@ -1145,7 +1277,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { ) for _, attr := range attrs { switch attr.Attr.Type { - case syscall.IFLA_LINKINFO: + case unix.IFLA_LINKINFO: infos, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err @@ -1177,10 +1309,16 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { link = &Macvtap{} case "gretap": link = &Gretap{} + case "ip6gretap": + link = &Gretap{} case "ipip": link = &Iptun{} + case "sit": + link = &Sittun{} case "gre": link = &Gretun{} + case "ip6gre": + link = &Gretun{} case "vti": link = &Vti{} case "vrf": @@ -1210,10 +1348,16 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { parseMacvtapData(link, data) case "gretap": parseGretapData(link, data) + case "ip6gretap": + parseGretapData(link, data) case "ipip": parseIptunData(link, data) + case "sit": + parseSittunData(link, data) case "gre": parseGretunData(link, data) + case "ip6gre": + parseGretunData(link, data) case "vti": parseVtiData(link, data) case "vrf": @@ -1225,7 +1369,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { } } } - case syscall.IFLA_ADDRESS: + case unix.IFLA_ADDRESS: var nonzero bool for _, b := range attr.Value { if b != 0 { @@ -1235,40 +1379,40 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { if nonzero { base.HardwareAddr = attr.Value[:] } - case syscall.IFLA_IFNAME: + case unix.IFLA_IFNAME: base.Name = string(attr.Value[:len(attr.Value)-1]) - case syscall.IFLA_MTU: + case unix.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_LINK: + case unix.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_MASTER: + case unix.IFLA_MASTER: base.MasterIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_TXQLEN: + case unix.IFLA_TXQLEN: base.TxQLen = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_IFALIAS: + case unix.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) - case syscall.IFLA_STATS: + case unix.IFLA_STATS: stats32 = attr.Value[:] - case IFLA_STATS64: + case unix.IFLA_STATS64: stats64 = attr.Value[:] - case nl.IFLA_XDP: + case unix.IFLA_XDP: xdp, err := parseLinkXdp(attr.Value[:]) if err != nil { return nil, err } base.Xdp = xdp - case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED: - if hdr != nil && hdr.Type == syscall.RTM_NEWLINK && - msg.Family == syscall.AF_BRIDGE { + case unix.IFLA_PROTINFO | unix.NLA_F_NESTED: + if hdr != nil && hdr.Type == unix.RTM_NEWLINK && + msg.Family == unix.AF_BRIDGE { attrs, err := nl.ParseRouteAttr(attr.Value[:]) if err != nil { return nil, err } base.Protinfo = parseProtinfo(attrs) } - case syscall.IFLA_OPERSTATE: + case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) - case nl.IFLA_LINK_NETNSID: + case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) } } @@ -1299,12 +1443,12 @@ func LinkList() ([]Link, error) { func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) if err != nil { return nil, err } @@ -1324,20 +1468,20 @@ func (h *Handle) LinkList() ([]Link, error) { // LinkUpdate is used to pass information back from LinkSubscribe() type LinkUpdate struct { nl.IfInfomsg - Header syscall.NlMsghdr + Header unix.NlMsghdr Link } // LinkSubscribe takes a chan down which notifications will be sent // when links change. Close the 'done' chan to stop subscription. func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil) + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) } // LinkSubscribeAt works like LinkSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(ns, netns.None(), ch, done, nil) + return linkSubscribeAt(ns, netns.None(), ch, done, nil, false) } // LinkSubscribeOptions contains a set of options to use with @@ -1345,6 +1489,7 @@ func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct type LinkSubscribeOptions struct { Namespace *netns.NsHandle ErrorCallback func(error) + ListExisting bool } // LinkSubscribeWithOptions work like LinkSubscribe but enable to @@ -1355,11 +1500,11 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) } -func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) if err != nil { return err } @@ -1369,6 +1514,15 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c s.Close() }() } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETLINK, + unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(msg) + if err := s.Send(req); err != nil { + return err + } + } go func() { defer close(ch) for { @@ -1380,15 +1534,30 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c return } for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + native := nl.NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(syscall.Errno(-error)) + } + return + } ifmsg := nl.DeserializeIfInfomsg(m.Data) - link, err := LinkDeserialize(&m.Header, m.Data) + header := unix.NlMsghdr(m.Header) + link, err := LinkDeserialize(&header, m.Data) if err != nil { if cberr != nil { cberr(err) } return } - ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link} + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} } } }() @@ -1463,16 +1632,16 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) - br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) + br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) nl.NewRtAttrChild(br, attr, boolToByte(mode)) req.AddData(br) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } @@ -1490,19 +1659,19 @@ func LinkSetTxQLen(link Link, qlen int) error { func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(qlen)) - data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b) + data := nl.NewRtAttr(unix.IFLA_TXQLEN, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -1548,6 +1717,10 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan.L3miss = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_UDP_CSUM: vxlan.UDPCSum = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX: + vxlan.UDP6ZeroCSumTx = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX: + vxlan.UDP6ZeroCSumRx = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_GBP: vxlan.GBP = true case nl.IFLA_VXLAN_FLOWBASED: @@ -1650,7 +1823,8 @@ func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { - if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { + switch datum.Attr.Type { + case nl.IFLA_MACVLAN_MODE: switch native.Uint32(datum.Value[0:4]) { case nl.MACVLAN_MODE_PRIVATE: macv.Mode = MACVLAN_MODE_PRIVATE @@ -1663,7 +1837,16 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.MACVLAN_MODE_SOURCE: macv.Mode = MACVLAN_MODE_SOURCE } - return + case nl.IFLA_MACVLAN_MACADDR_COUNT: + macv.MACAddrs = make([]net.HardwareAddr, 0, int(native.Uint32(datum.Value[0:4]))) + case nl.IFLA_MACVLAN_MACADDR_DATA: + macs, err := nl.ParseRouteAttr(datum.Value[:]) + if err != nil { + panic(fmt.Sprintf("failed to ParseRouteAttr for IFLA_MACVLAN_MACADDR_DATA: %v", err)) + } + for _, macDatum := range macs { + macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) + } } } } @@ -1671,19 +1854,19 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { // copied from pkg/net_linux.go func linkFlags(rawFlags uint32) net.Flags { var f net.Flags - if rawFlags&syscall.IFF_UP != 0 { + if rawFlags&unix.IFF_UP != 0 { f |= net.FlagUp } - if rawFlags&syscall.IFF_BROADCAST != 0 { + if rawFlags&unix.IFF_BROADCAST != 0 { f |= net.FlagBroadcast } - if rawFlags&syscall.IFF_LOOPBACK != 0 { + if rawFlags&unix.IFF_LOOPBACK != 0 { f |= net.FlagLoopback } - if rawFlags&syscall.IFF_POINTOPOINT != 0 { + if rawFlags&unix.IFF_POINTOPOINT != 0 { f |= net.FlagPointToPoint } - if rawFlags&syscall.IFF_MULTICAST != 0 { + if rawFlags&unix.IFF_MULTICAST != 0 { f |= net.FlagMulticast } return f @@ -1698,12 +1881,17 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { return } - ip := gretap.Local.To4() - if ip != nil { + if ip := gretap.Local; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) } - ip = gretap.Remote.To4() - if ip != nil { + + if ip := gretap.Remote; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) } @@ -1742,9 +1930,9 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_IKEY: gre.OKey = ntohl(datum.Value[0:4]) case nl.IFLA_GRE_LOCAL: - gre.Local = net.IP(datum.Value[0:4]) + gre.Local = net.IP(datum.Value[0:16]) case nl.IFLA_GRE_REMOTE: - gre.Remote = net.IP(datum.Value[0:4]) + gre.Remote = net.IP(datum.Value[0:16]) case nl.IFLA_GRE_ENCAP_SPORT: gre.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_ENCAP_DPORT: @@ -1765,7 +1953,9 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - gre.FlowBased = int8(datum.Value[0]) != 0 + if len(datum.Value) > 0 { + gre.FlowBased = int8(datum.Value[0]) != 0 + } } } } @@ -1773,12 +1963,17 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - ip := gre.Local.To4() - if ip != nil { + if ip := gre.Local; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) } - ip = gre.Remote.To4() - if ip != nil { + + if ip := gre.Remote; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) } @@ -1802,6 +1997,10 @@ func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) } func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1813,9 +2012,9 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_IKEY: gre.OKey = ntohl(datum.Value[0:4]) case nl.IFLA_GRE_LOCAL: - gre.Local = net.IP(datum.Value[0:4]) + gre.Local = net.IP(datum.Value[0:16]) case nl.IFLA_GRE_REMOTE: - gre.Remote = net.IP(datum.Value[0:4]) + gre.Remote = net.IP(datum.Value[0:16]) case nl.IFLA_GRE_IFLAGS: gre.IFlags = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_OFLAGS: @@ -1827,6 +2026,14 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.Tos = uint8(datum.Value[0]) case nl.IFLA_GRE_PMTUDISC: gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -1840,11 +2047,12 @@ func parseLinkStats64(data []byte) *LinkStatistics { } func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { - attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil) + attrs := nl.NewRtAttr(unix.IFLA_XDP|unix.NLA_F_NESTED, nil) b := make([]byte, 4) native.PutUint32(b, uint32(xdp.Fd)) nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) if xdp.Flags != 0 { + b := make([]byte, 4) native.PutUint32(b, xdp.Flags) nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b) } @@ -1873,6 +2081,12 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + if iptun.FlowBased { + // In flow based mode, no other attributes need to be configured + nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) + return + } + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) ip := iptun.Local.To4() @@ -1891,6 +2105,10 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1907,6 +2125,72 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.Tos = uint8(datum.Value[0]) case nl.IFLA_IPTUN_PMTUDISC: iptun.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + iptun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + iptun.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_TYPE: + iptun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + iptun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + iptun.FlowBased = int8(datum.Value[0]) != 0 + } + } +} + +func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + if sittun.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) + } + + ip := sittun.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = sittun.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + if sittun.Ttl > 0 { + // Would otherwise fail on 3.10 kernel + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) + } + + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) +} + +func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { + sittun := link.(*Sittun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + sittun.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_REMOTE: + sittun.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_TTL: + sittun.Ttl = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_TOS: + sittun.Tos = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_PMTUDISC: + sittun.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_TYPE: + sittun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + sittun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + sittun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + sittun.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -2014,3 +2298,57 @@ func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) { } } } + +// LinkSetBondSlave add slave to bond link via ioctl interface. +func LinkSetBondSlave(link Link, master *Bond) error { + fd, err := getSocketUDP() + if err != nil { + return err + } + defer syscall.Close(fd) + + ifreq := newIocltSlaveReq(link.Attrs().Name, master.Attrs().Name) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), unix.SIOCBONDENSLAVE, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return fmt.Errorf("Failed to enslave %q to %q, errno=%v", link.Attrs().Name, master.Attrs().Name, errno) + } + return nil +} + +// VethPeerIndex get veth peer index. +func VethPeerIndex(link *Veth) (int, error) { + fd, err := getSocketUDP() + if err != nil { + return -1, err + } + defer syscall.Close(fd) + + ifreq, sSet := newIocltStringSetReq(link.Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + } + + gstrings := ðtoolGstrings{ + cmd: ETHTOOL_GSTRINGS, + stringSet: ETH_SS_STATS, + length: sSet.data[0], + } + ifreq.Data = uintptr(unsafe.Pointer(gstrings)) + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + } + + stats := ðtoolStats{ + cmd: ETHTOOL_GSTATS, + nStats: gstrings.length, + } + ifreq.Data = uintptr(unsafe.Pointer(stats)) + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + } + return int(stats.data[0]), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go index 6a6f71ce866..3f5cd497a73 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go @@ -15,6 +15,8 @@ type Neigh struct { IP net.IP HardwareAddr net.HardwareAddr LLIPAddr net.IP //Used in the case of NHRP + Vlan int + VNI int } // String returns $ip/$hwaddr $label diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go index 5edc8b41259..f75c22649f9 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -2,10 +2,10 @@ package netlink import ( "net" - "syscall" "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) const ( @@ -73,7 +73,7 @@ func NeighAdd(neigh *Neigh) error { // NeighAdd will add an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh add ....` func (h *Handle) NeighAdd(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_EXCL) } // NeighSet will add or replace an IP to MAC mapping to the ARP table @@ -85,7 +85,7 @@ func NeighSet(neigh *Neigh) error { // NeighSet will add or replace an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh replace....` func (h *Handle) NeighSet(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_REPLACE) } // NeighAppend will append an entry to FDB @@ -97,7 +97,7 @@ func NeighAppend(neigh *Neigh) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) NeighAppend(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_APPEND) } // NeighAppend will append an entry to FDB @@ -109,7 +109,7 @@ func neighAdd(neigh *Neigh, mode int) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) neighAdd(neigh *Neigh, mode int) error { - req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWNEIGH, mode|unix.NLM_F_ACK) return neighHandle(neigh, req) } @@ -122,7 +122,7 @@ func NeighDel(neigh *Neigh) error { // NeighDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) NeighDel(neigh *Neigh) error { - req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELNEIGH, unix.NLM_F_ACK) return neighHandle(neigh, req) } @@ -160,7 +160,17 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { req.AddData(hwData) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if neigh.Vlan != 0 { + vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) + req.AddData(vlanData) + } + + if neigh.VNI != 0 { + vniData := nl.NewRtAttr(NDA_VNI, nl.Uint32Attr(uint32(neigh.VNI))) + req.AddData(vniData) + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -193,7 +203,7 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { - req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) msg := Ndmsg{ Family: uint8(family), Index: uint32(linkIndex), @@ -201,7 +211,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { } req.AddData(&msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) if err != nil { return nil, err } @@ -257,7 +267,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // BUG: Is this a bug in the netlink library? // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) - attrLen := attr.Attr.Len - syscall.SizeofRtAttr + attrLen := attr.Attr.Len - unix.SizeofRtAttr if attrLen == 4 && (encapType == "ipip" || encapType == "sit" || encapType == "gre") { @@ -268,6 +278,10 @@ func NeighDeserialize(m []byte) (*Neigh, error) { } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } + case NDA_VLAN: + neigh.Vlan = int(native.Uint16(attr.Value[0:2])) + case NDA_VNI: + neigh.VNI = int(native.Uint32(attr.Value[0:4])) } } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index fe362e9fa7c..50db3b4cdd8 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -1,17 +1,18 @@ package nl import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) type IfAddrmsg struct { - syscall.IfAddrmsg + unix.IfAddrmsg } func NewIfAddrmsg(family int) *IfAddrmsg { return &IfAddrmsg{ - IfAddrmsg: syscall.IfAddrmsg{ + IfAddrmsg: unix.IfAddrmsg{ Family: uint8(family), }, } @@ -35,15 +36,15 @@ func NewIfAddrmsg(family int) *IfAddrmsg { // SizeofIfAddrmsg = 0x8 func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { - return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) + return (*IfAddrmsg)(unsafe.Pointer(&b[0:unix.SizeofIfAddrmsg][0])) } func (msg *IfAddrmsg) Serialize() []byte { - return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfAddrmsg) Len() int { - return syscall.SizeofIfAddrmsg + return unix.SizeofIfAddrmsg } // struct ifa_cacheinfo { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 9ae65a12c2a..84a3498dd3d 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,35 +1,11 @@ package nl import ( - "syscall" "unsafe" ) const ( DEFAULT_CHANGE = 0xFFFFFFFF - // doesn't exist in syscall - IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota - IFLA_STATS64 - IFLA_VF_PORTS - IFLA_PORT_SELF - IFLA_AF_SPEC - IFLA_GROUP - IFLA_NET_NS_FD - IFLA_EXT_MASK - IFLA_PROMISCUITY - IFLA_NUM_TX_QUEUES - IFLA_NUM_RX_QUEUES - IFLA_CARRIER - IFLA_PHYS_PORT_ID - IFLA_CARRIER_CHANGES - IFLA_PHYS_SWITCH_ID - IFLA_LINK_NETNSID - IFLA_PHYS_PORT_NAME - IFLA_PROTO_DOWN - IFLA_GSO_MAX_SEGS - IFLA_GSO_MAX_SIZE - IFLA_PAD - IFLA_XDP ) const ( @@ -118,6 +94,10 @@ const ( IFLA_MACVLAN_UNSPEC = iota IFLA_MACVLAN_MODE IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_MACADDR_MODE + IFLA_MACVLAN_MACADDR + IFLA_MACVLAN_MACADDR_DATA + IFLA_MACVLAN_MACADDR_COUNT IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS ) @@ -129,6 +109,13 @@ const ( MACVLAN_MODE_SOURCE = 16 ) +const ( + MACVLAN_MACADDR_ADD = iota + MACVLAN_MACADDR_DEL + MACVLAN_MACADDR_FLUSH + MACVLAN_MACADDR_SET +) + const ( IFLA_BOND_UNSPEC = iota IFLA_BOND_MODE @@ -475,7 +462,12 @@ const ( IFLA_IPTUN_6RD_RELAY_PREFIX IFLA_IPTUN_6RD_PREFIXLEN IFLA_IPTUN_6RD_RELAY_PREFIXLEN - IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN + IFLA_IPTUN_ENCAP_TYPE + IFLA_IPTUN_ENCAP_FLAGS + IFLA_IPTUN_ENCAP_SPORT + IFLA_IPTUN_ENCAP_DPORT + IFLA_IPTUN_COLLECT_METADATA + IFLA_IPTUN_MAX = IFLA_IPTUN_COLLECT_METADATA ) const ( diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 72f7f6af3c8..bc8e82c2cc4 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -13,18 +13,19 @@ import ( "unsafe" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) const ( // Family type definitions - FAMILY_ALL = syscall.AF_UNSPEC - FAMILY_V4 = syscall.AF_INET - FAMILY_V6 = syscall.AF_INET6 + FAMILY_ALL = unix.AF_UNSPEC + FAMILY_V4 = unix.AF_INET + FAMILY_V6 = unix.AF_INET6 FAMILY_MPLS = AF_MPLS ) // SupportedNlFamilies contains the list of netlink families this netlink package supports -var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER} +var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETLINK_NETFILTER} var nextSeqNr uint32 @@ -77,161 +78,161 @@ type NetlinkRequestData interface { // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { - syscall.IfInfomsg + unix.IfInfomsg } // Create an IfInfomsg with family specified func NewIfInfomsg(family int) *IfInfomsg { return &IfInfomsg{ - IfInfomsg: syscall.IfInfomsg{ + IfInfomsg: unix.IfInfomsg{ Family: uint8(family), }, } } func DeserializeIfInfomsg(b []byte) *IfInfomsg { - return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) + return (*IfInfomsg)(unsafe.Pointer(&b[0:unix.SizeofIfInfomsg][0])) } func (msg *IfInfomsg) Serialize() []byte { - return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfInfomsg) Len() int { - return syscall.SizeofIfInfomsg + return unix.SizeofIfInfomsg } func (msg *IfInfomsg) EncapType() string { switch msg.Type { case 0: return "generic" - case syscall.ARPHRD_ETHER: + case unix.ARPHRD_ETHER: return "ether" - case syscall.ARPHRD_EETHER: + case unix.ARPHRD_EETHER: return "eether" - case syscall.ARPHRD_AX25: + case unix.ARPHRD_AX25: return "ax25" - case syscall.ARPHRD_PRONET: + case unix.ARPHRD_PRONET: return "pronet" - case syscall.ARPHRD_CHAOS: + case unix.ARPHRD_CHAOS: return "chaos" - case syscall.ARPHRD_IEEE802: + case unix.ARPHRD_IEEE802: return "ieee802" - case syscall.ARPHRD_ARCNET: + case unix.ARPHRD_ARCNET: return "arcnet" - case syscall.ARPHRD_APPLETLK: + case unix.ARPHRD_APPLETLK: return "atalk" - case syscall.ARPHRD_DLCI: + case unix.ARPHRD_DLCI: return "dlci" - case syscall.ARPHRD_ATM: + case unix.ARPHRD_ATM: return "atm" - case syscall.ARPHRD_METRICOM: + case unix.ARPHRD_METRICOM: return "metricom" - case syscall.ARPHRD_IEEE1394: + case unix.ARPHRD_IEEE1394: return "ieee1394" - case syscall.ARPHRD_INFINIBAND: + case unix.ARPHRD_INFINIBAND: return "infiniband" - case syscall.ARPHRD_SLIP: + case unix.ARPHRD_SLIP: return "slip" - case syscall.ARPHRD_CSLIP: + case unix.ARPHRD_CSLIP: return "cslip" - case syscall.ARPHRD_SLIP6: + case unix.ARPHRD_SLIP6: return "slip6" - case syscall.ARPHRD_CSLIP6: + case unix.ARPHRD_CSLIP6: return "cslip6" - case syscall.ARPHRD_RSRVD: + case unix.ARPHRD_RSRVD: return "rsrvd" - case syscall.ARPHRD_ADAPT: + case unix.ARPHRD_ADAPT: return "adapt" - case syscall.ARPHRD_ROSE: + case unix.ARPHRD_ROSE: return "rose" - case syscall.ARPHRD_X25: + case unix.ARPHRD_X25: return "x25" - case syscall.ARPHRD_HWX25: + case unix.ARPHRD_HWX25: return "hwx25" - case syscall.ARPHRD_PPP: + case unix.ARPHRD_PPP: return "ppp" - case syscall.ARPHRD_HDLC: + case unix.ARPHRD_HDLC: return "hdlc" - case syscall.ARPHRD_LAPB: + case unix.ARPHRD_LAPB: return "lapb" - case syscall.ARPHRD_DDCMP: + case unix.ARPHRD_DDCMP: return "ddcmp" - case syscall.ARPHRD_RAWHDLC: + case unix.ARPHRD_RAWHDLC: return "rawhdlc" - case syscall.ARPHRD_TUNNEL: + case unix.ARPHRD_TUNNEL: return "ipip" - case syscall.ARPHRD_TUNNEL6: + case unix.ARPHRD_TUNNEL6: return "tunnel6" - case syscall.ARPHRD_FRAD: + case unix.ARPHRD_FRAD: return "frad" - case syscall.ARPHRD_SKIP: + case unix.ARPHRD_SKIP: return "skip" - case syscall.ARPHRD_LOOPBACK: + case unix.ARPHRD_LOOPBACK: return "loopback" - case syscall.ARPHRD_LOCALTLK: + case unix.ARPHRD_LOCALTLK: return "ltalk" - case syscall.ARPHRD_FDDI: + case unix.ARPHRD_FDDI: return "fddi" - case syscall.ARPHRD_BIF: + case unix.ARPHRD_BIF: return "bif" - case syscall.ARPHRD_SIT: + case unix.ARPHRD_SIT: return "sit" - case syscall.ARPHRD_IPDDP: + case unix.ARPHRD_IPDDP: return "ip/ddp" - case syscall.ARPHRD_IPGRE: + case unix.ARPHRD_IPGRE: return "gre" - case syscall.ARPHRD_PIMREG: + case unix.ARPHRD_PIMREG: return "pimreg" - case syscall.ARPHRD_HIPPI: + case unix.ARPHRD_HIPPI: return "hippi" - case syscall.ARPHRD_ASH: + case unix.ARPHRD_ASH: return "ash" - case syscall.ARPHRD_ECONET: + case unix.ARPHRD_ECONET: return "econet" - case syscall.ARPHRD_IRDA: + case unix.ARPHRD_IRDA: return "irda" - case syscall.ARPHRD_FCPP: + case unix.ARPHRD_FCPP: return "fcpp" - case syscall.ARPHRD_FCAL: + case unix.ARPHRD_FCAL: return "fcal" - case syscall.ARPHRD_FCPL: + case unix.ARPHRD_FCPL: return "fcpl" - case syscall.ARPHRD_FCFABRIC: + case unix.ARPHRD_FCFABRIC: return "fcfb0" - case syscall.ARPHRD_FCFABRIC + 1: + case unix.ARPHRD_FCFABRIC + 1: return "fcfb1" - case syscall.ARPHRD_FCFABRIC + 2: + case unix.ARPHRD_FCFABRIC + 2: return "fcfb2" - case syscall.ARPHRD_FCFABRIC + 3: + case unix.ARPHRD_FCFABRIC + 3: return "fcfb3" - case syscall.ARPHRD_FCFABRIC + 4: + case unix.ARPHRD_FCFABRIC + 4: return "fcfb4" - case syscall.ARPHRD_FCFABRIC + 5: + case unix.ARPHRD_FCFABRIC + 5: return "fcfb5" - case syscall.ARPHRD_FCFABRIC + 6: + case unix.ARPHRD_FCFABRIC + 6: return "fcfb6" - case syscall.ARPHRD_FCFABRIC + 7: + case unix.ARPHRD_FCFABRIC + 7: return "fcfb7" - case syscall.ARPHRD_FCFABRIC + 8: + case unix.ARPHRD_FCFABRIC + 8: return "fcfb8" - case syscall.ARPHRD_FCFABRIC + 9: + case unix.ARPHRD_FCFABRIC + 9: return "fcfb9" - case syscall.ARPHRD_FCFABRIC + 10: + case unix.ARPHRD_FCFABRIC + 10: return "fcfb10" - case syscall.ARPHRD_FCFABRIC + 11: + case unix.ARPHRD_FCFABRIC + 11: return "fcfb11" - case syscall.ARPHRD_FCFABRIC + 12: + case unix.ARPHRD_FCFABRIC + 12: return "fcfb12" - case syscall.ARPHRD_IEEE802_TR: + case unix.ARPHRD_IEEE802_TR: return "tr" - case syscall.ARPHRD_IEEE80211: + case unix.ARPHRD_IEEE80211: return "ieee802.11" - case syscall.ARPHRD_IEEE80211_PRISM: + case unix.ARPHRD_IEEE80211_PRISM: return "ieee802.11/prism" - case syscall.ARPHRD_IEEE80211_RADIOTAP: + case unix.ARPHRD_IEEE80211_RADIOTAP: return "ieee802.11/radiotap" - case syscall.ARPHRD_IEEE802154: + case unix.ARPHRD_IEEE802154: return "ieee802.15.4" case 65534: @@ -243,7 +244,7 @@ func (msg *IfInfomsg) EncapType() string { } func rtaAlignOf(attrlen int) int { - return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) + return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) } func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { @@ -254,7 +255,7 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { // Extend RtAttr to handle data and children type RtAttr struct { - syscall.RtAttr + unix.RtAttr Data []byte children []NetlinkRequestData } @@ -262,7 +263,7 @@ type RtAttr struct { // Create a new Extended RtAttr object func NewRtAttr(attrType int, data []byte) *RtAttr { return &RtAttr{ - RtAttr: syscall.RtAttr{ + RtAttr: unix.RtAttr{ Type: uint16(attrType), }, children: []NetlinkRequestData{}, @@ -277,16 +278,21 @@ func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { return attr } +// AddChild adds an existing RtAttr as a child. +func (a *RtAttr) AddChild(attr *RtAttr) { + a.children = append(a.children, attr) +} + func (a *RtAttr) Len() int { if len(a.children) == 0 { - return (syscall.SizeofRtAttr + len(a.Data)) + return (unix.SizeofRtAttr + len(a.Data)) } l := 0 for _, child := range a.children { l += rtaAlignOf(child.Len()) } - l += syscall.SizeofRtAttr + l += unix.SizeofRtAttr return rtaAlignOf(l + len(a.Data)) } @@ -319,7 +325,7 @@ func (a *RtAttr) Serialize() []byte { } type NetlinkRequest struct { - syscall.NlMsghdr + unix.NlMsghdr Data []NetlinkRequestData RawData []byte Sockets map[int]*SocketHandle @@ -327,7 +333,7 @@ type NetlinkRequest struct { // Serialize the Netlink Request into a byte array func (req *NetlinkRequest) Serialize() []byte { - length := syscall.SizeofNlMsghdr + length := unix.SizeofNlMsghdr dataBytes := make([][]byte, len(req.Data)) for i, data := range req.Data { dataBytes[i] = data.Serialize() @@ -337,8 +343,8 @@ func (req *NetlinkRequest) Serialize() []byte { req.Len = uint32(length) b := make([]byte, length) - hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] - next := syscall.SizeofNlMsghdr + hdr := (*(*[unix.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := unix.SizeofNlMsghdr copy(b[0:next], hdr) for _, data := range dataBytes { for _, dataByte := range data { @@ -421,10 +427,10 @@ done: if m.Header.Pid != pid { return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } - if m.Header.Type == syscall.NLMSG_DONE { + if m.Header.Type == unix.NLMSG_DONE { break done } - if m.Header.Type == syscall.NLMSG_ERROR { + if m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { @@ -436,7 +442,7 @@ done: continue } res = append(res, m.Data) - if m.Header.Flags&syscall.NLM_F_MULTI == 0 { + if m.Header.Flags&unix.NLM_F_MULTI == 0 { break done } } @@ -449,10 +455,10 @@ done: // the message is serialized func NewNetlinkRequest(proto, flags int) *NetlinkRequest { return &NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.SizeofNlMsghdr), + NlMsghdr: unix.NlMsghdr{ + Len: uint32(unix.SizeofNlMsghdr), Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), + Flags: unix.NLM_F_REQUEST | uint16(flags), Seq: atomic.AddUint32(&nextSeqNr, 1), }, } @@ -460,21 +466,21 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { type NetlinkSocket struct { fd int32 - lsa syscall.SockaddrNetlink + lsa unix.SockaddrNetlink sync.Mutex } func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = syscall.AF_NETLINK - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) + s.lsa.Family = unix.AF_NETLINK + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) return nil, err } @@ -551,21 +557,21 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = syscall.AF_NETLINK + s.lsa.Family = unix.AF_NETLINK for _, g := range groups { s.lsa.Groups |= (1 << (g - 1)) } - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) return nil, err } @@ -586,7 +592,7 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne func (s *NetlinkSocket) Close() { fd := int(atomic.SwapInt32(&s.fd, -1)) - syscall.Close(fd) + unix.Close(fd) } func (s *NetlinkSocket) GetFd() int { @@ -598,7 +604,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { if fd < 0 { return fmt.Errorf("Send called on a closed socket") } - if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { + if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { return err } return nil @@ -609,12 +615,12 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { if fd < 0 { return nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, syscall.Getpagesize()) - nr, _, err := syscall.Recvfrom(fd, rb, 0) + rb := make([]byte, unix.Getpagesize()) + nr, _, err := unix.Recvfrom(fd, rb, 0) if err != nil { return nil, err } - if nr < syscall.NLMSG_HDRLEN { + if nr < unix.NLMSG_HDRLEN { return nil, fmt.Errorf("Got short response from netlink") } rb = rb[:nr] @@ -622,27 +628,27 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { } // SetSendTimeout allows to set a send timeout on the socket -func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error { +func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine // remains stuck on a send on a closed fd - return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout) + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) } // SetReceiveTimeout allows to set a receive timeout on the socket -func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error { +func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine // remains stuck on a recvmsg on a closed fd - return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout) + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) } func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := syscall.Getsockname(fd) + lsa, err := unix.Getsockname(fd) if err != nil { return 0, err } switch v := lsa.(type) { - case *syscall.SockaddrNetlink: + case *unix.SockaddrNetlink: return v.Pid, nil } return 0, fmt.Errorf("Wrong socket type") @@ -697,24 +703,24 @@ func Uint64Attr(v uint64) []byte { func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr - for len(b) >= syscall.SizeofRtAttr { + for len(b) >= unix.SizeofRtAttr { a, vbuf, alen, err := netlinkRouteAttrAndValue(b) if err != nil { return nil, err } - ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} + ra := syscall.NetlinkRouteAttr{Attr: syscall.RtAttr(*a), Value: vbuf[:int(a.Len)-unix.SizeofRtAttr]} attrs = append(attrs, ra) b = b[alen:] } return attrs, nil } -func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { - a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) - if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { - return nil, nil, 0, syscall.EINVAL +func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { + a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, unix.EINVAL } - return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil + return a, b[unix.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil } // SocketHandle contains the netlink socket and the associated diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go index 1a064d65d2f..f6906fcaf7e 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -1,65 +1,66 @@ package nl import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) type RtMsg struct { - syscall.RtMsg + unix.RtMsg } func NewRtMsg() *RtMsg { return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_UNIVERSE, - Protocol: syscall.RTPROT_BOOT, - Type: syscall.RTN_UNICAST, + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_UNIVERSE, + Protocol: unix.RTPROT_BOOT, + Type: unix.RTN_UNICAST, }, } } func NewRtDelMsg() *RtMsg { return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_NOWHERE, + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_NOWHERE, }, } } func (msg *RtMsg) Len() int { - return syscall.SizeofRtMsg + return unix.SizeofRtMsg } func DeserializeRtMsg(b []byte) *RtMsg { - return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) + return (*RtMsg)(unsafe.Pointer(&b[0:unix.SizeofRtMsg][0])) } func (msg *RtMsg) Serialize() []byte { - return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] } type RtNexthop struct { - syscall.RtNexthop + unix.RtNexthop Children []NetlinkRequestData } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0])) + return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) } func (msg *RtNexthop) Len() int { if len(msg.Children) == 0 { - return syscall.SizeofRtNexthop + return unix.SizeofRtNexthop } l := 0 for _, child := range msg.Children { l += rtaAlignOf(child.Len()) } - l += syscall.SizeofRtNexthop + l += unix.SizeofRtNexthop return rtaAlignOf(l) } @@ -67,8 +68,8 @@ func (msg *RtNexthop) Serialize() []byte { length := msg.Len() msg.RtNexthop.Len = uint16(length) buf := make([]byte, length) - copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) - next := rtaAlignOf(syscall.SizeofRtNexthop) + copy(buf, (*(*[unix.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(unix.SizeofRtNexthop) if len(msg.Children) > 0 { for _, child := range msg.Children { childBuf := child.Serialize() diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go new file mode 100644 index 00000000000..b3425f6b0ec --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -0,0 +1,111 @@ +package nl + +import ( + "errors" + "fmt" + "net" +) + +type IPv6SrHdr struct { + nextHdr uint8 + hdrLen uint8 + routingType uint8 + segmentsLeft uint8 + firstSegment uint8 + flags uint8 + reserved uint16 + + Segments []net.IP +} + +func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { + if len(s1.Segments) != len(s2.Segments) { + return false + } + for i := range s1.Segments { + if s1.Segments[i].Equal(s2.Segments[i]) != true { + return false + } + } + return s1.nextHdr == s2.nextHdr && + s1.hdrLen == s2.hdrLen && + s1.routingType == s2.routingType && + s1.segmentsLeft == s2.segmentsLeft && + s1.firstSegment == s2.firstSegment && + s1.flags == s2.flags + // reserved doesn't need to be identical. +} + +// seg6 encap mode +const ( + SEG6_IPTUN_MODE_INLINE = iota + SEG6_IPTUN_MODE_ENCAP +) + +// number of nested RTATTR +// from include/uapi/linux/seg6_iptunnel.h +const ( + SEG6_IPTUNNEL_UNSPEC = iota + SEG6_IPTUNNEL_SRH + __SEG6_IPTUNNEL_MAX +) +const ( + SEG6_IPTUNNEL_MAX = __SEG6_IPTUNNEL_MAX - 1 +) + +func EncodeSEG6Encap(mode int, segments []net.IP) ([]byte, error) { + nsegs := len(segments) // nsegs: number of segments + if nsegs == 0 { + return nil, errors.New("EncodeSEG6Encap: No Segment in srh") + } + b := make([]byte, 12, 12+len(segments)*16) + native := NativeEndian() + native.PutUint32(b, uint32(mode)) + b[4] = 0 // srh.nextHdr (0 when calling netlink) + b[5] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) + b[6] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) + b[7] = uint8(nsegs - 1) // srh.segmentsLeft + b[8] = uint8(nsegs - 1) // srh.firstSegment + b[9] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) + // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 + native.PutUint16(b[10:], 0) // srh.reserved + for _, netIP := range segments { + b = append(b, netIP...) // srh.Segments + } + return b, nil +} + +func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { + native := NativeEndian() + mode := int(native.Uint32(buf)) + srh := IPv6SrHdr{ + nextHdr: buf[4], + hdrLen: buf[5], + routingType: buf[6], + segmentsLeft: buf[7], + firstSegment: buf[8], + flags: buf[9], + reserved: native.Uint16(buf[10:12]), + } + buf = buf[12:] + if len(buf)%16 != 0 { + err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) + return mode, nil, err + } + for len(buf) > 0 { + srh.Segments = append(srh.Segments, net.IP(buf[:16])) + buf = buf[16:] + } + return mode, srh.Segments, nil +} + +// Helper functions +func SEG6EncapModeString(mode int) string { + switch mode { + case SEG6_IPTUN_MODE_INLINE: + return "inline" + case SEG6_IPTUN_MODE_ENCAP: + return "encap" + } + return "unknown" +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go index 3473e536384..fc631e0e505 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -65,4 +65,14 @@ const ( LWTUNNEL_ENCAP_IP LWTUNNEL_ENCAP_ILA LWTUNNEL_ENCAP_IP6 + LWTUNNEL_ENCAP_SEG6 + LWTUNNEL_ENCAP_BPF +) + +// routing header types +const ( + IPV6_SRCRT_STRICT = 0x01 // Deprecated; will be removed + IPV6_SRCRT_TYPE_0 = 0 // Deprecated; will be removed + IPV6_SRCRT_TYPE_2 = 2 // IPv6 type 2 Routing Header + IPV6_SRCRT_TYPE_4 = 4 // Segment Routing with IPv6 ) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index e91fb21c558..94ebc290a9e 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -673,3 +673,38 @@ const ( TCA_FW_MASK TCA_FW_MAX = TCA_FW_MASK ) + +const ( + TCA_MATCHALL_UNSPEC = iota + TCA_MATCHALL_CLASSID + TCA_MATCHALL_ACT + TCA_MATCHALL_FLAGS +) + +const ( + TCA_FQ_UNSPEC = iota + TCA_FQ_PLIMIT // limit of total number of packets in queue + TCA_FQ_FLOW_PLIMIT // limit of packets per flow + TCA_FQ_QUANTUM // RR quantum + TCA_FQ_INITIAL_QUANTUM // RR quantum for new flow + TCA_FQ_RATE_ENABLE // enable/disable rate limiting + TCA_FQ_FLOW_DEFAULT_RATE // obsolete do not use + TCA_FQ_FLOW_MAX_RATE // per flow max rate + TCA_FQ_BUCKETS_LOG // log2(number of buckets) + TCA_FQ_FLOW_REFILL_DELAY // flow credit refill delay in usec + TCA_FQ_ORPHAN_MASK // mask applied to orphaned skb hashes + TCA_FQ_LOW_RATE_THRESHOLD // per packet delay under this rate +) + +const ( + TCA_FQ_CODEL_UNSPEC = iota + TCA_FQ_CODEL_TARGET + TCA_FQ_CODEL_LIMIT + TCA_FQ_CODEL_INTERVAL + TCA_FQ_CODEL_ECN + TCA_FQ_CODEL_FLOWS + TCA_FQ_CODEL_QUANTUM + TCA_FQ_CODEL_CE_THRESHOLD + TCA_FQ_CODEL_DROP_BATCH_SIZE + TCA_FQ_CODEL_MEMORY_LIMIT +) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 10dd0d53357..43c465f0575 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func LinkGetProtinfo(link Link) (Protinfo, error) { @@ -15,10 +16,10 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) var pi Protinfo - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return pi, err } @@ -33,7 +34,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { return pi, err } for _, attr := range attrs { - if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { + if attr.Attr.Type != unix.IFLA_PROTINFO|unix.NLA_F_NESTED { continue } infos, err := nl.ParseRouteAttr(attr.Value) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc.go index 0ca86ebe89f..3df4b5c291c 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc.go @@ -230,3 +230,63 @@ func (qdisc *GenericQdisc) Attrs() *QdiscAttrs { func (qdisc *GenericQdisc) Type() string { return qdisc.QdiscType } + +// Fq is a classless packet scheduler meant to be mostly used for locally generated traffic. +type Fq struct { + QdiscAttrs + PacketLimit uint32 + FlowPacketLimit uint32 + // In bytes + Quantum uint32 + InitialQuantum uint32 + // called RateEnable under the hood + Pacing uint32 + FlowDefaultRate uint32 + FlowMaxRate uint32 + // called BucketsLog under the hood + Buckets uint32 + FlowRefillDelay uint32 + LowRateThreshold uint32 +} + +func NewFq(attrs QdiscAttrs) *Fq { + return &Fq{ + QdiscAttrs: attrs, + Pacing: 1, + } +} + +func (qdisc *Fq) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Fq) Type() string { + return "fq" +} + +// FQ_Codel (Fair Queuing Controlled Delay) is queuing discipline that combines Fair Queuing with the CoDel AQM scheme. +type FqCodel struct { + QdiscAttrs + Target uint32 + Limit uint32 + Interval uint32 + ECN uint32 + Flows uint32 + Quantum uint32 + // There are some more attributes here, but support for them seems not ubiquitous +} + +func NewFqCodel(attrs QdiscAttrs) *FqCodel { + return &FqCodel{ + QdiscAttrs: attrs, + ECN: 1, + } +} + +func (qdisc *FqCodel) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *FqCodel) Type() string { + return "fq_codel" +} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 1123396e47d..3794ac18a83 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -8,6 +8,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // NOTE function is here because it uses other linux functions @@ -84,7 +85,7 @@ func QdiscDel(qdisc Qdisc) error { // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func (h *Handle) QdiscDel(qdisc Qdisc) error { - return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) + return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) } // QdiscChange will change a qdisc in place @@ -98,7 +99,7 @@ func QdiscChange(qdisc Qdisc) error { // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func (h *Handle) QdiscChange(qdisc Qdisc) error { - return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) + return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) } // QdiscReplace will replace a qdisc to the system. @@ -113,8 +114,8 @@ func QdiscReplace(qdisc Qdisc) error { // The handle MUST change. func (h *Handle) QdiscReplace(qdisc Qdisc) error { return h.qdiscModify( - syscall.RTM_NEWQDISC, - syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_REPLACE, qdisc) } @@ -128,13 +129,13 @@ func QdiscAdd(qdisc Qdisc) error { // Equivalent to: `tc qdisc add $qdisc` func (h *Handle) QdiscAdd(qdisc Qdisc) error { return h.qdiscModify( - syscall.RTM_NEWQDISC, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, qdisc) } func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { - req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) base := qdisc.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -145,13 +146,13 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { req.AddData(msg) // When deleting don't bother building the rest of the netlink payload - if cmd != syscall.RTM_DELQDISC { + if cmd != unix.RTM_DELQDISC { if err := qdiscPayload(req, qdisc); err != nil { return err } } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -231,6 +232,48 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.Attrs().Parent != HANDLE_INGRESS { return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") } + case *FqCodel: + nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN)))) + if qdisc.Limit > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit)))) + } + if qdisc.Interval > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval)))) + } + if qdisc.Flows > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows)))) + } + if qdisc.Quantum > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + } + + case *Fq: + nl.NewRtAttrChild(options, nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) + + if qdisc.Buckets > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) + } + if qdisc.LowRateThreshold > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) + } + if qdisc.Quantum > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + } + if qdisc.InitialQuantum > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum)))) + } + if qdisc.FlowRefillDelay > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay)))) + } + if qdisc.FlowPacketLimit > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit)))) + } + if qdisc.FlowMaxRate > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate)))) + } + if qdisc.FlowDefaultRate > 0 { + nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) + } } req.AddData(options) @@ -248,7 +291,7 @@ func QdiscList(link Link) ([]Qdisc, error) { // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { - req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) index := int32(0) if link != nil { base := link.Attrs() @@ -261,7 +304,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) if err != nil { return nil, err } @@ -303,6 +346,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { qdisc = &Ingress{} case "htb": qdisc = &Htb{} + case "fq": + qdisc = &Fq{} + case "fq_codel": + qdisc = &FqCodel{} case "netem": qdisc = &Netem{} default: @@ -336,6 +383,22 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { if err := parseHtbData(qdisc, data); err != nil { return nil, err } + case "fq": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseFqData(qdisc, data); err != nil { + return nil, err + } + case "fq_codel": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseFqCodelData(qdisc, data); err != nil { + return nil, err + } case "netem": if err := parseNetemData(qdisc, attr.Value); err != nil { return nil, err @@ -388,6 +451,61 @@ func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { return nil } +func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + fqCodel := qdisc.(*FqCodel) + for _, datum := range data { + + switch datum.Attr.Type { + case nl.TCA_FQ_CODEL_TARGET: + fqCodel.Target = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_LIMIT: + fqCodel.Limit = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_INTERVAL: + fqCodel.Interval = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_ECN: + fqCodel.ECN = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_FLOWS: + fqCodel.Flows = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_QUANTUM: + fqCodel.Quantum = native.Uint32(datum.Value) + } + } + return nil +} + +func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + fq := qdisc.(*Fq) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FQ_BUCKETS_LOG: + fq.Buckets = native.Uint32(datum.Value) + case nl.TCA_FQ_LOW_RATE_THRESHOLD: + fq.LowRateThreshold = native.Uint32(datum.Value) + case nl.TCA_FQ_QUANTUM: + fq.Quantum = native.Uint32(datum.Value) + case nl.TCA_FQ_RATE_ENABLE: + fq.Pacing = native.Uint32(datum.Value) + case nl.TCA_FQ_INITIAL_QUANTUM: + fq.InitialQuantum = native.Uint32(datum.Value) + case nl.TCA_FQ_ORPHAN_MASK: + // TODO + case nl.TCA_FQ_FLOW_REFILL_DELAY: + fq.FlowRefillDelay = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_PLIMIT: + fq.FlowPacketLimit = native.Uint32(datum.Value) + case nl.TCA_FQ_PLIMIT: + fq.PacketLimit = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_MAX_RATE: + fq.FlowMaxRate = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_DEFAULT_RATE: + fq.FlowDefaultRate = native.Uint32(datum.Value) + } + } + return nil +} + func parseNetemData(qdisc Qdisc, value []byte) error { netem := qdisc.(*Netem) opt := nl.DeserializeTcNetemQopt(value) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go index 68c6a2230d2..2cd58ee3342 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go @@ -45,6 +45,8 @@ type Route struct { MPLSDst *int NewDst Destination Encap Encap + MTU int + AdvMSS int } func (r Route) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go index 9234c6986da..3f856711f3c 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go @@ -8,16 +8,17 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // RtAttr is shared so it is in netlink_linux.go const ( - SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE - SCOPE_SITE Scope = syscall.RT_SCOPE_SITE - SCOPE_LINK Scope = syscall.RT_SCOPE_LINK - SCOPE_HOST Scope = syscall.RT_SCOPE_HOST - SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE + SCOPE_UNIVERSE Scope = unix.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = unix.RT_SCOPE_SITE + SCOPE_LINK Scope = unix.RT_SCOPE_LINK + SCOPE_HOST Scope = unix.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE ) const ( @@ -34,8 +35,8 @@ const ( ) const ( - FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK - FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE + FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE ) var testFlags = []flagString{ @@ -124,17 +125,17 @@ func (e *MPLSEncap) Type() int { func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { - return fmt.Errorf("Lack of bytes") + return fmt.Errorf("lack of bytes") } native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { - return fmt.Errorf("Lack of bytes") + return fmt.Errorf("lack of bytes") } buf = buf[:l] typ := native.Uint16(buf[2:]) if typ != nl.MPLS_IPTUNNEL_DST { - return fmt.Errorf("Unknown MPLS Encap Type: %d", typ) + return fmt.Errorf("unknown MPLS Encap Type: %d", typ) } e.Labels = nl.DecodeMPLSStack(buf[4:]) return nil @@ -185,6 +186,79 @@ func (e *MPLSEncap) Equal(x Encap) bool { return true } +// SEG6 definitions +type SEG6Encap struct { + Mode int + Segments []net.IP +} + +func (e *SEG6Encap) Type() int { + return nl.LWTUNNEL_ENCAP_SEG6 +} +func (e *SEG6Encap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lack of bytes") + } + native := nl.NativeEndian() + // Get Length(l) & Type(typ) : 2 + 2 bytes + l := native.Uint16(buf) + if len(buf) < int(l) { + return fmt.Errorf("lack of bytes") + } + buf = buf[:l] // make sure buf size upper limit is Length + typ := native.Uint16(buf[2:]) + if typ != nl.SEG6_IPTUNNEL_SRH { + return fmt.Errorf("unknown SEG6 Type: %d", typ) + } + + var err error + e.Mode, e.Segments, err = nl.DecodeSEG6Encap(buf[4:]) + + return err +} +func (e *SEG6Encap) Encode() ([]byte, error) { + s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) + native := nl.NativeEndian() + hdr := make([]byte, 4) + native.PutUint16(hdr, uint16(len(s)+4)) + native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) + return append(hdr, s...), err +} +func (e *SEG6Encap) String() string { + segs := make([]string, 0, len(e.Segments)) + // append segment backwards (from n to 0) since seg#0 is the last segment. + for i := len(e.Segments); i > 0; i-- { + segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + } + str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), + len(e.Segments), strings.Join(segs, " ")) + return str +} +func (e *SEG6Encap) Equal(x Encap) bool { + o, ok := x.(*SEG6Encap) + if !ok { + return false + } + if e == o { + return true + } + if e == nil || o == nil { + return false + } + if e.Mode != o.Mode { + return false + } + if len(e.Segments) != len(o.Segments) { + return false + } + for i := range e.Segments { + if !e.Segments[i].Equal(o.Segments[i]) { + return false + } + } + return true +} + // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -194,8 +268,8 @@ func RouteAdd(route *Route) error { // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func (h *Handle) RouteAdd(route *Route) error { - flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK - req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -208,8 +282,8 @@ func RouteReplace(route *Route) error { // RouteReplace will add a route to the system. // Equivalent to: `ip route replace $route` func (h *Handle) RouteReplace(route *Route) error { - flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK - req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -222,7 +296,7 @@ func RouteDel(route *Route) error { // RouteDel will delete a route from the system. // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { - req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) return h.routeHandle(route, req, nl.NewRtDelMsg()) } @@ -245,12 +319,12 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { dstData = route.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) } else if route.MPLSDst != nil { family = nl.FAMILY_MPLS msg.Dst_len = uint8(20) - msg.Type = syscall.RTN_UNICAST - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + msg.Type = unix.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) } if route.NewDst != nil { @@ -288,7 +362,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg srcData = route.Src.To16() } // The commonly used src ip for routes is actually PREFSRC - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PREFSRC, srcData)) } if route.Gw != nil { @@ -303,14 +377,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { gwData = route.Gw.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) } if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { rtnh := &nl.RtNexthop{ - RtNexthop: syscall.RtNexthop{ + RtNexthop: unix.RtNexthop{ Hops: uint8(nh.Hops), Ifindex: int32(nh.LinkIndex), Flags: uint8(nh.Flags), @@ -323,9 +397,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg return fmt.Errorf("gateway, source, and destination ip are not the same IP family") } if gwFamily == FAMILY_V4 { - children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4()))) + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) } else { - children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16()))) + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To16()))) } } if nh.NewDst != nil { @@ -351,15 +425,15 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_MULTIPATH, buf)) } if route.Table > 0 { if route.Table >= 256 { - msg.Table = syscall.RT_TABLE_UNSPEC + msg.Table = unix.RT_TABLE_UNSPEC b := make([]byte, 4) native.PutUint32(b, uint32(route.Table)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_TABLE, b)) } else { msg.Table = uint8(route.Table) } @@ -368,7 +442,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Priority > 0 { b := make([]byte, 4) native.PutUint32(b, uint32(route.Priority)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) } if route.Tos > 0 { msg.Tos = uint8(route.Tos) @@ -380,6 +454,25 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Type = uint8(route.Type) } + var metrics []*nl.RtAttr + // TODO: support other rta_metric values + if route.MTU > 0 { + b := nl.Uint32Attr(uint32(route.MTU)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) + } + if route.AdvMSS > 0 { + b := nl.Uint32Attr(uint32(route.AdvMSS)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) + } + + if metrics != nil { + attr := nl.NewRtAttr(unix.RTA_METRICS, nil) + for _, metric := range metrics { + attr.AddChild(metric) + } + rtAttrs = append(rtAttrs, attr) + } + msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) msg.Family = uint8(family) @@ -394,9 +487,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg ) native.PutUint32(b, uint32(route.LinkIndex)) - req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -429,11 +522,11 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) infmsg := nl.NewIfInfomsg(family) req.AddData(infmsg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err } @@ -441,11 +534,11 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) var res []Route for _, m := range msgs { msg := nl.DeserializeRtMsg(m) - if msg.Flags&syscall.RTM_F_CLONED != 0 { + if msg.Flags&unix.RTM_F_CLONED != 0 { // Ignore cloned routes continue } - if msg.Table != syscall.RT_TABLE_MAIN { + if msg.Table != unix.RT_TABLE_MAIN { if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables continue @@ -457,7 +550,7 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) } if filter != nil { switch { - case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table: + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: continue case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: continue @@ -508,11 +601,11 @@ func deserializeRoute(m []byte) (Route, error) { var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case syscall.RTA_GATEWAY: + case unix.RTA_GATEWAY: route.Gw = net.IP(attr.Value) - case syscall.RTA_PREFSRC: + case unix.RTA_PREFSRC: route.Src = net.IP(attr.Value) - case syscall.RTA_DST: + case unix.RTA_DST: if msg.Family == nl.FAMILY_MPLS { stack := nl.DecodeMPLSStack(attr.Value) if len(stack) == 0 || len(stack) > 1 { @@ -525,36 +618,36 @@ func deserializeRoute(m []byte) (Route, error) { Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), } } - case syscall.RTA_OIF: + case unix.RTA_OIF: route.LinkIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_IIF: + case unix.RTA_IIF: route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_PRIORITY: + case unix.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_TABLE: + case unix.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_MULTIPATH: + case unix.RTA_MULTIPATH: parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { - if len(value) < syscall.SizeofRtNexthop { - return nil, nil, fmt.Errorf("Lack of bytes") + if len(value) < unix.SizeofRtNexthop { + return nil, nil, fmt.Errorf("lack of bytes") } nh := nl.DeserializeRtNexthop(value) if len(value) < int(nh.RtNexthop.Len) { - return nil, nil, fmt.Errorf("Lack of bytes") + return nil, nil, fmt.Errorf("lack of bytes") } info := &NexthopInfo{ LinkIndex: int(nh.RtNexthop.Ifindex), Hops: int(nh.RtNexthop.Hops), Flags: int(nh.RtNexthop.Flags), } - attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + attrs, err := nl.ParseRouteAttr(value[unix.SizeofRtNexthop:int(nh.RtNexthop.Len)]) if err != nil { return nil, nil, err } var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case syscall.RTA_GATEWAY: + case unix.RTA_GATEWAY: info.Gw = net.IP(attr.Value) case nl.RTA_NEWDST: var d Destination @@ -611,6 +704,19 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case nl.RTA_ENCAP: encap = attr + case unix.RTA_METRICS: + metrics, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return route, err + } + for _, metric := range metrics { + switch metric.Attr.Type { + case unix.RTAX_MTU: + route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_ADVMSS: + route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + } + } } } @@ -623,6 +729,11 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } + case nl.LWTUNNEL_ENCAP_SEG6: + e = &SEG6Encap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } } route.Encap = e } @@ -639,7 +750,7 @@ func RouteGet(destination net.IP) ([]Route, error) { // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { - req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte var bitlen uint8 @@ -655,10 +766,10 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { msg.Dst_len = bitlen req.AddData(msg) - rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) + rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) req.AddData(rtaDst) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err } @@ -678,13 +789,13 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { // RouteSubscribe takes a chan down which notifications will be sent // when routes are added or deleted. Close the 'done' chan to stop subscription. func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil) + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) } // RouteSubscribeAt works like RouteSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(ns, netns.None(), ch, done, nil) + return routeSubscribeAt(ns, netns.None(), ch, done, nil, false) } // RouteSubscribeOptions contains a set of options to use with @@ -692,6 +803,7 @@ func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan stru type RouteSubscribeOptions struct { Namespace *netns.NsHandle ErrorCallback func(error) + ListExisting bool } // RouteSubscribeWithOptions work like RouteSubscribe but enable to @@ -702,11 +814,11 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti none := netns.None() options.Namespace = &none } - return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) } -func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) if err != nil { return err } @@ -716,6 +828,15 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < s.Close() }() } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETROUTE, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + } go func() { defer close(ch) for { @@ -727,6 +848,20 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < return } for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + native := nl.NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(syscall.Errno(-error)) + } + return + } route, err := deserializeRoute(m.Data) if err != nil { if cberr != nil { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go index e4d9168d6c0..7fc8ae5df15 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go @@ -21,6 +21,7 @@ type Rule struct { OifName string SuppressIfgroup int SuppressPrefixlen int + Invert bool } func (r Rule) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go index cbd91a56bb3..6238ae45864 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -3,11 +3,13 @@ package netlink import ( "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) +const FibRuleInvert = 0x2 + // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func RuleAdd(rule *Rule) error { @@ -17,7 +19,7 @@ func RuleAdd(rule *Rule) error { // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func (h *Handle) RuleAdd(rule *Rule) error { - req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWRULE, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return ruleHandle(rule, req) } @@ -30,18 +32,31 @@ func RuleDel(rule *Rule) error { // RuleDel deletes a rule from the system. // Equivalent to: ip rule del func (h *Handle) RuleDel(rule *Rule) error { - req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELRULE, unix.NLM_F_ACK) return ruleHandle(rule, req) } func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg := nl.NewRtMsg() - msg.Family = syscall.AF_INET + msg.Family = unix.AF_INET + msg.Protocol = unix.RTPROT_BOOT + msg.Scope = unix.RT_SCOPE_UNIVERSE + msg.Table = unix.RT_TABLE_UNSPEC + msg.Type = unix.RTN_UNSPEC + if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { + msg.Type = unix.RTN_UNICAST + } + if rule.Invert { + msg.Flags |= FibRuleInvert + } if rule.Family != 0 { msg.Family = uint8(rule.Family) } - var dstFamily uint8 + if rule.Table >= 0 && rule.Table < 256 { + msg.Table = uint8(rule.Table) + } + var dstFamily uint8 var rtAttrs []*nl.RtAttr if rule.Dst != nil && rule.Dst.IP != nil { dstLen, _ := rule.Dst.Mask.Size() @@ -49,12 +64,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) dstFamily = msg.Family var dstData []byte - if msg.Family == syscall.AF_INET { + if msg.Family == unix.AF_INET { dstData = rule.Dst.IP.To4() } else { dstData = rule.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) } if rule.Src != nil && rule.Src.IP != nil { @@ -65,19 +80,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { srcLen, _ := rule.Src.Mask.Size() msg.Src_len = uint8(srcLen) var srcData []byte - if msg.Family == syscall.AF_INET { + if msg.Family == unix.AF_INET { srcData = rule.Src.IP.To4() } else { srcData = rule.Src.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) - } - - if rule.Table >= 0 { - msg.Table = uint8(rule.Table) - if rule.Table >= 256 { - msg.Table = syscall.RT_TABLE_UNSPEC - } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_SRC, srcData)) } req.AddData(msg) @@ -142,7 +150,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -155,11 +163,11 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { - req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) + req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) if err != nil { return nil, err } @@ -175,9 +183,11 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule := NewRule() + rule.Invert = msg.Flags&FibRuleInvert > 0 + for j := range attrs { switch attrs[j].Attr.Type { - case syscall.RTA_TABLE: + case unix.RTA_TABLE: rule.Table = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_SRC: rule.Src = &net.IPNet{ diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go index b42b84f0cfe..99e9fb4d897 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,9 +4,9 @@ import ( "errors" "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) const ( @@ -123,15 +123,15 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } - s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG) + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) if err != nil { return nil, err } defer s.Close() req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) req.AddData(&socketRequest{ - Family: syscall.AF_INET, - Protocol: syscall.IPPROTO_TCP, + Family: unix.AF_INET, + Protocol: unix.IPPROTO_TCP, ID: SocketID{ SourcePort: uint16(localTCP.Port), DestinationPort: uint16(remoteTCP.Port), diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go index 9962dcf7006..02b41842e10 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go @@ -2,19 +2,20 @@ package netlink import ( "fmt" - "syscall" + + "golang.org/x/sys/unix" ) // Proto is an enum representing an ipsec protocol. type Proto uint8 const ( - XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING - XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP - XFRM_PROTO_AH Proto = syscall.IPPROTO_AH - XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS + XFRM_PROTO_ROUTE2 Proto = unix.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP + XFRM_PROTO_AH Proto = unix.IPPROTO_AH + XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin - XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW + XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW ) func (p Proto) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index 7b98c9cb6d3..efe72ddf29c 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -2,11 +2,10 @@ package netlink import ( "fmt" - "syscall" - - "github.com/vishvananda/netns" "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) type XfrmMsg interface { @@ -39,7 +38,7 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error if err != nil { return nil } - s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...) + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_XFRM, groups...) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index c3d4e422272..fde0c2ca5ad 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,9 +1,8 @@ package netlink import ( - "syscall" - "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { @@ -55,7 +54,7 @@ func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { } func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := &nl.XfrmUserpolicyInfo{} selFromPolicy(&msg.Sel, policy) @@ -91,7 +90,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -121,12 +120,12 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) if err != nil { return nil, err } @@ -165,13 +164,13 @@ func XfrmPolicyFlush() error { // XfrmPolicyFlush will flush the policies on the system. // Equivalent to: `ip xfrm policy flush` func (h *Handle) XfrmPolicyFlush() error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK) - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) msg := &nl.XfrmUserpolicyId{} selFromPolicy(&msg.Sel, policy) @@ -189,7 +188,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo resType = 0 } - msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go index 368a9b986d6..d14740dc55b 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -3,6 +3,7 @@ package netlink import ( "fmt" "net" + "time" ) // XfrmStateAlgo represents the algorithm to use for the ipsec encryption. @@ -67,6 +68,19 @@ type XfrmStateLimits struct { TimeUseHard uint64 } +// XfrmStateStats represents the current number of bytes/packets +// processed by this State, the State's installation and first use +// time and the replay window counters. +type XfrmStateStats struct { + ReplayWindow uint32 + Replay uint32 + Failed uint32 + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + // XfrmState represents the state of an ipsec policy. It optionally // contains an XfrmStateAlgo for encryption and one for authentication. type XfrmState struct { @@ -78,6 +92,7 @@ type XfrmState struct { Reqid int ReplayWindow int Limits XfrmStateLimits + Statistics XfrmStateStats Mark *XfrmMark Auth *XfrmStateAlgo Crypt *XfrmStateAlgo @@ -94,10 +109,16 @@ func (sa XfrmState) Print(stats bool) string { if !stats { return sa.String() } - - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d", + at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) + ut := "-" + if sa.Statistics.UseTime > 0 { + ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) + } + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ + "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard) + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, + sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) } func printLimit(lmt uint64) string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 6a7bc0deca2..5dfdb33e449 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,10 +2,10 @@ package netlink import ( "fmt" - "syscall" "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func writeStateAlgo(a *XfrmStateAlgo) []byte { @@ -69,8 +69,10 @@ func writeReplayEsn(replayWindow int) []byte { ReplayWindow: uint32(replayWindow), } - // taken from iproute2/ip/xfrm_state.c: - replayEsn.BmpLen = uint32((replayWindow + (4 * 8) - 1) / (4 * 8)) + // Linux stores the bitmap to identify the already received sequence packets in blocks of uint32 elements. + // Therefore bitmap length is the minimum number of uint32 elements needed. The following is a ceiling operation. + bytesPerElem := int(unsafe.Sizeof(replayEsn.BmpLen)) // Any uint32 variable is good for this + replayEsn.BmpLen = uint32((replayWindow + (bytesPerElem * 8) - 1) / (bytesPerElem * 8)) return replayEsn.Serialize() } @@ -111,7 +113,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { if state.Spi == 0 { return fmt.Errorf("Spi must be set when adding xfrm state.") } - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := xfrmUsersaInfoFromXfrmState(state) @@ -157,13 +159,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := &nl.XfrmUserSpiInfo{} msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) @@ -177,7 +179,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req.AddData(out) } - msgs, err := req.Execute(syscall.NETLINK_XFRM, 0) + msgs, err := req.Execute(unix.NETLINK_XFRM, 0) if err != nil { return nil, err } @@ -216,9 +218,9 @@ func XfrmStateList(family int) ([]XfrmState, error) { // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) - msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) if err != nil { return nil, err } @@ -255,7 +257,7 @@ func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { } func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) msg := &nl.XfrmUsersaId{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) @@ -278,7 +280,7 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState resType = 0 } - msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } @@ -308,6 +310,7 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.Reqid = int(msg.Reqid) state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) + curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) return &state } @@ -386,11 +389,11 @@ func XfrmStateFlush(proto Proto) error { // proto = 0 means any transformation protocols // Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` func (h *Handle) XfrmStateFlush(proto Proto) error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, unix.NLM_F_ACK) req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) if err != nil { return err } @@ -429,6 +432,16 @@ func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) } +func curToStats(cur *nl.XfrmLifetimeCur, wstats *nl.XfrmStats, stats *XfrmStateStats) { + stats.Bytes = cur.Bytes + stats.Packets = cur.Packets + stats.AddTime = cur.AddTime + stats.UseTime = cur.UseTime + stats.ReplayWindow = wstats.ReplayWindow + stats.Replay = wstats.Replay + stats.Failed = wstats.IntegrityFailed +} + func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg := &nl.XfrmUsersaInfo{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go index 70a2b535945..03767fc353b 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go @@ -906,6 +906,12 @@ func (f *Finder) DefaultFolder(ctx context.Context) (*object.Folder, error) { } folder := object.NewFolder(f.client, ref.Reference()) + // Set the InventoryPath of the newly created folder object + // The default foler becomes the datacenter's "vm" folder. + // The "vm" folder always exists for a datacenter. It cannot be + // removed or replaced + folder.SetInventoryPath(path.Join(f.dc.InventoryPath, "vm")) + return folder, nil } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go index b2dd3ac8341..90b9fbe16f6 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -169,6 +169,7 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { client := NewClient(u, c.k) client.Namespace = "urn:" + namespace + client.Transport.(*http.Transport).TLSClientConfig = c.Transport.(*http.Transport).TLSClientConfig if cert := c.Certificate(); cert != nil { client.SetCertificate(*cert) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/LICENSE b/cluster-autoscaler/vendor/go.etcd.io/etcd/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/LICENSE rename to cluster-autoscaler/vendor/go.etcd.io/etcd/LICENSE diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/NOTICE b/cluster-autoscaler/vendor/go.etcd.io/etcd/NOTICE similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/NOTICE rename to cluster-autoscaler/vendor/go.etcd.io/etcd/NOTICE diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go similarity index 60% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go index c5faf00c647..7e038df0146 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go @@ -1,16 +1,30 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: auth.proto +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + UserAddOptions + User + Permission + Role +*/ package authpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -37,7 +51,6 @@ var Permission_Type_name = map[int32]string{ 1: "WRITE", 2: "READWRITE", } - var Permission_Type_value = map[string]int32{ "READ": 0, "WRITE": 1, @@ -47,174 +60,92 @@ var Permission_Type_value = map[string]int32{ func (x Permission_Type) String() string { return proto.EnumName(Permission_Type_name, int32(x)) } +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2, 0} } -func (Permission_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1, 0} +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` } +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + // User is a single entry in the bucket authUsers type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` } -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{0} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_User.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(m, src) -} -func (m *User) XXX_Size() int { - return m.Size() -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } // Permission is a single entity type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1} -} -func (m *Permission) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Permission.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Permission) XXX_Merge(src proto.Message) { - xxx_messageInfo_Permission.Merge(m, src) -} -func (m *Permission) XXX_Size() int { - return m.Size() -} -func (m *Permission) XXX_DiscardUnknown() { - xxx_messageInfo_Permission.DiscardUnknown(m) -} - -var xxx_messageInfo_Permission proto.InternalMessageInfo +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } // Role is a single entry in the bucket authRoles type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` } -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{2} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Role.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{3} } func init() { - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") proto.RegisterType((*User)(nil), "authpb.User") proto.RegisterType((*Permission)(nil), "authpb.Permission") proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } - -var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, - 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, - 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, - 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, - 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, - 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, - 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, - 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, - 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, - 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, - 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, - 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, - 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, - 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, - 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, - 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, - 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, +func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoPassword { + dAtA[i] = 0x8 + i++ + if m.NoPassword { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } func (m *User) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -222,49 +153,54 @@ func (m *User) Marshal() (dAtA []byte, err error) { } func (m *User) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Options != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - return len(dAtA) - i, nil + return i, nil } func (m *Permission) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -272,45 +208,34 @@ func (m *Permission) Marshal() (dAtA []byte, err error) { } func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.PermType != 0 { - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - i-- - dAtA[i] = 0x8 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -318,58 +243,50 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.KeyPermission) > 0 { - for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuth(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.KeyPermission { dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - offset -= sovAuth(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } -func (m *User) Size() (n int) { - if m == nil { - return 0 +func (m *UserAddOptions) Size() (n int) { + var l int + _ = l + if m.NoPassword { + n += 2 } + return n +} + +func (m *User) Size() (n int) { var l int _ = l l = len(m.Name) @@ -386,16 +303,14 @@ func (m *User) Size() (n int) { n += 1 + l + sovAuth(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovAuth(uint64(l)) } return n } func (m *Permission) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.PermType != 0 { @@ -409,16 +324,10 @@ func (m *Permission) Size() (n int) { if l > 0 { n += 1 + l + sovAuth(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Role) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -431,18 +340,92 @@ func (m *Role) Size() (n int) { n += 1 + l + sovAuth(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovAuth(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozAuth(x uint64) (n int) { return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPassword = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *User) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -458,7 +441,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -486,7 +469,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -495,9 +478,6 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -520,7 +500,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -529,9 +509,6 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -554,7 +531,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -564,13 +541,43 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthAuth } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + if m.Options == nil { + m.Options = &UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -581,13 +588,9 @@ func (m *User) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -612,7 +615,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -640,7 +643,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PermType |= Permission_Type(b&0x7F) << shift + m.PermType |= (Permission_Type(b) & 0x7F) << shift if b < 0x80 { break } @@ -659,7 +662,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -668,9 +671,6 @@ func (m *Permission) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -693,7 +693,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -702,9 +702,6 @@ func (m *Permission) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -722,13 +719,9 @@ func (m *Permission) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -753,7 +746,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -781,7 +774,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -790,9 +783,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -815,7 +805,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -824,9 +814,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -844,13 +831,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -914,11 +897,8 @@ func skipAuth(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthAuth - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthAuth } return iNdEx, nil @@ -949,9 +929,6 @@ func skipAuth(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthAuth - } } return iNdEx, nil case 4: @@ -970,3 +947,31 @@ var ( ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.proto similarity index 89% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.proto rename to cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.proto index 001d3343548..8f82b7cf1e4 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/auth/authpb/auth.proto +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/auth/authpb/auth.proto @@ -9,11 +9,16 @@ option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; option (gogoproto.goproto_enum_prefix_all) = false; +message UserAddOptions { + bool no_password = 1; +}; + // User is a single entry in the bucket authUsers message User { bytes name = 1; bytes password = 2; repeated string roles = 3; + UserAddOptions options = 4; } // Permission is a single entity diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/README.md b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/README.md new file mode 100644 index 00000000000..6c6fe7c67c4 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/auth.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/auth.go similarity index 74% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/auth.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/auth.go index edccf1a8caf..c954f1bf474 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/auth.go @@ -19,9 +19,8 @@ import ( "fmt" "strings" - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - + "go.etcd.io/etcd/auth/authpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) @@ -53,6 +52,8 @@ const ( PermReadWrite = authpb.READWRITE ) +type UserAddOptions authpb.UserAddOptions + type Auth interface { // AuthEnable enables auth of an etcd cluster. AuthEnable(ctx context.Context) (*AuthEnableResponse, error) @@ -63,6 +64,9 @@ type Auth interface { // UserAdd adds a new user to an etcd cluster. UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + // UserDelete deletes a user from an etcd cluster. UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) @@ -100,70 +104,75 @@ type Auth interface { RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) } -type auth struct { +type authClient struct { remote pb.AuthClient callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - api := &auth{remote: RetryAuthClient(c)} + api := &authClient{remote: RetryAuthClient(c)} if c != nil { api.callOpts = c.callOpts } return api } -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { perm := &authpb.Permission{ Key: []byte(key), RangeEnd: []byte(rangeEnd), @@ -173,22 +182,22 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go index 9306385e96c..d02a7eec7c3 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3/balancer/connectivity" - "github.com/coreos/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/connectivity" + "go.etcd.io/etcd/clientv3/balancer/picker" "go.uber.org/zap" "google.golang.org/grpc/balancer" diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go index 1f32039e37b..864b5df6426 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go @@ -16,7 +16,9 @@ package endpoint import ( + "context" "fmt" + "net" "net/url" "strings" "sync" @@ -228,13 +230,18 @@ func ParseTarget(target string) (string, string, error) { return parts[0], parts[1], nil } -// ParseHostPort splits a ":" string into the host and port parts. -// The port part is optional. -func ParseHostPort(hostPort string) (host string, port string) { - parts := strings.SplitN(hostPort, ":", 2) - host = parts[0] - if len(parts) > 1 { - port = parts[1] +// Dialer dials a endpoint using net.Dialer. +// Context cancelation and timeout are supported. +func Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + proto, host, _ := ParseEndpoint(dialEp) + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + dialer := &net.Dialer{} + if deadline, ok := ctx.Deadline(); ok { + dialer.Deadline = deadline } - return host, port + return dialer.DialContext(ctx, proto, host) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/client.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/client.go similarity index 92% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/client.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/client.go index 64ac2da1502..215e0547980 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/client.go @@ -26,12 +26,12 @@ import ( "time" "github.com/google/uuid" - "github.com/coreos/etcd/clientv3/balancer" - "github.com/coreos/etcd/clientv3/balancer/picker" - "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint" - "github.com/coreos/etcd/clientv3/credentials" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/pkg/logutil" + "go.etcd.io/etcd/clientv3/balancer" + "go.etcd.io/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/clientv3/credentials" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/logutil" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -129,8 +129,12 @@ func NewFromURLs(urls []string) (*Client, error) { // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() - c.Watcher.Close() - c.Lease.Close() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } if c.resolverGroup != nil { c.resolverGroup.Close() } @@ -226,24 +230,17 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts } opts = append(opts, dopts...) - // Provide a net dialer that supports cancelation and timeout. - f := func(dialEp string, t time.Duration) (net.Conn, error) { - proto, host, _ := endpoint.ParseEndpoint(dialEp) - select { - case <-c.ctx.Done(): - return nil, c.ctx.Err() - default: - } - dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) - } - opts = append(opts, grpc.WithDialer(f)) - + dialer := endpoint.Dialer if creds != nil { opts = append(opts, grpc.WithTransportCredentials(creds)) + // gRPC load balancer workaround. See credentials.transportCredential for details. + if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok { + dialer = credsDialer.Dialer + } } else { opts = append(opts, grpc.WithInsecure()) } + opts = append(opts, grpc.WithContextDialer(dialer)) // Interceptor retry and backoff. // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with @@ -262,7 +259,10 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts // Dial connects to a single endpoint using the client's config. func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { - creds := c.directDialCreds(ep) + creds, err := c.directDialCreds(ep) + if err != nil { + return nil, err + } // Use the grpc passthrough resolver to directly dial a single endpoint. // This resolver passes through the 'unix' and 'unixs' endpoints schemes used // by etcd without modification, allowing us to directly dial endpoints and @@ -365,8 +365,8 @@ func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, return conn, nil } -func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials { - _, hostPort, scheme := endpoint.ParseEndpoint(ep) +func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) { + _, host, scheme := endpoint.ParseEndpoint(ep) creds := c.creds if len(scheme) != 0 { creds = c.processCreds(scheme) @@ -375,12 +375,17 @@ func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials // Set the server name must to the endpoint hostname without port since grpc // otherwise attempts to check if x509 cert is valid for the full endpoint // including the scheme and port, which fails. - host, _ := endpoint.ParseHostPort(hostPort) - clone.OverrideServerName(host) + overrideServerName, _, err := net.SplitHostPort(host) + if err != nil { + // Either the host didn't have a port or the host could not be parsed. Either way, continue with the + // original host string. + overrideServerName = host + } + clone.OverrideServerName(overrideServerName) creds = clone } } - return creds + return creds, nil } func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials { @@ -659,3 +664,9 @@ func IsConnCanceled(err error) bool { // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' return strings.Contains(err.Error(), "grpc: the client connection is closing") } + +// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details. +type TransportCredentialsWithDialer interface { + grpccredentials.TransportCredentials + Dialer(ctx context.Context, dialEp string) (net.Conn, error) +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/cluster.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/cluster.go similarity index 69% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/cluster.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/cluster.go index 785672be8ca..ce97e5c85b8 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/cluster.go @@ -17,18 +17,19 @@ package clientv3 import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" "google.golang.org/grpc" ) type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse ) type Cluster interface { @@ -38,11 +39,17 @@ type Cluster interface { // MemberAdd adds a new member into the cluster. MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) // MemberUpdate updates the peer addresses of the member. MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) } type cluster struct { @@ -67,12 +74,23 @@ func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { } func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { // fail-fast before panic in rafthttp if _, err := types.NewURLs(peerAddrs); err != nil { return nil, err } - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { return nil, toErr(ctx, err) @@ -112,3 +130,12 @@ func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { } return nil, toErr(ctx, err) } + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compact_op.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compact_op.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compact_op.go index 41e80c1da5d..5779713d3dd 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compact_op.go @@ -15,7 +15,7 @@ package clientv3 import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) // CompactOp represents a compact operation. diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compare.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compare.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compare.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compare.go index b5f0a255279..01ed68e942a 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/compare.go @@ -15,7 +15,7 @@ package clientv3 import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) type CompareTarget int diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/config.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/config.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/config.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/config.go index 9c17fc25223..11d447d5756 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/config.go @@ -72,15 +72,17 @@ type Config struct { // Without this, Dial returns immediately and connecting the server happens in background. DialOptions []grpc.DialOption + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + // LogConfig configures client-side logger. // If nil, use the default logger. // TODO: configure gRPC logger LogConfig *zap.Config - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context - // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go similarity index 67% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go index e5a55667fe6..63389c08bff 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go @@ -22,7 +22,8 @@ import ( "net" "sync" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" grpccredentials "google.golang.org/grpc/credentials" ) @@ -65,38 +66,37 @@ func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { } // transportCredential implements "grpccredentials.TransportCredentials" interface. +// transportCredential wraps TransportCredentials to track which +// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the +// hostname or IP of the dialed endpoint. +// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when +// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name +// in their TLS certs. +// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer) +// when dialing. type transportCredential struct { gtc grpccredentials.TransportCredentials + mu sync.Mutex + // addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the + // endpoint provided to the dialer when dialing + addrToEndpoint map[string]string } func newTransportCredential(cfg *tls.Config) *transportCredential { return &transportCredential{ - gtc: grpccredentials.NewTLS(cfg), + gtc: grpccredentials.NewTLS(cfg), + addrToEndpoint: map[string]string{}, } } func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { - // Only overwrite when authority is an IP address! - // Let's say, a server runs SRV records on "etcd.local" that resolves - // to "m1.etcd.local", and its SAN field also includes "m1.etcd.local". - // But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)? - // Then, the server should only authenticate using its DNS hostname "m1.etcd.local", - // instead of overwriting it with its IP address. - // And we do not overwrite "localhost" either. Only overwrite IP addresses! - if isIP(authority) { - target := rawConn.RemoteAddr().String() - if authority != target { - // When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget" - // update only happens once. This is problematic, because when TLS is enabled, - // retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from - // the initial dial call. - // If the server authenticates by IP addresses, we want to set a new endpoint as - // a new authority. Otherwise - // "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156" - // when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180" - // but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156" - authority = target - } + // Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint + tc.mu.Lock() + dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()] + tc.mu.Unlock() + if ok { + _, host, _ := endpoint.ParseEndpoint(dialEp) + authority = host } return tc.gtc.ClientHandshake(ctx, authority, rawConn) } @@ -115,8 +115,15 @@ func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { } func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { + copy := map[string]string{} + tc.mu.Lock() + for k, v := range tc.addrToEndpoint { + copy[k] = v + } + tc.mu.Unlock() return &transportCredential{ - gtc: tc.gtc.Clone(), + gtc: tc.gtc.Clone(), + addrToEndpoint: copy, } } @@ -124,6 +131,17 @@ func (tc *transportCredential) OverrideServerName(serverNameOverride string) err return tc.gtc.OverrideServerName(serverNameOverride) } +func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + // Keep track of which addresses are dialed for which endpoints + conn, err := endpoint.Dialer(ctx, dialEp) + if conn != nil { + tc.mu.Lock() + tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp + tc.mu.Unlock() + } + return conn, err +} + // perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. type perRPCCredential struct { authToken string diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/doc.go similarity index 82% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/doc.go index 717fbe435ea..913cd28255b 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/doc.go @@ -19,7 +19,7 @@ // // expect dial time-out on ipv4 blackhole // _, err := clientv3.New(clientv3.Config{ // Endpoints: []string{"http://254.0.0.1:12345"}, -// DialTimeout: 2 * time.Second +// DialTimeout: 2 * time.Second, // }) // // // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 @@ -61,7 +61,7 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // @@ -71,14 +71,14 @@ // // ctx is canceled by another routine // } else if err == context.DeadlineExceeded { // // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided // } else if ev, ok := status.FromError(err); ok { // code := ev.Code() // if code == codes.DeadlineExceeded { // // server-side context might have timed-out first (due to clock skew) // // while original client-side context is not timed-out yet // } -// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { -// // process (verr.Errors) // } else { // // bad cluster endpoints, which are not etcd servers // } @@ -87,11 +87,20 @@ // go func() { cli.Close() }() // _, err := kvc.Get(ctx, "a") // if err != nil { +// // with etcd clientv3 <= v3.3 // if err == context.Canceled { // // grpc balancer calls 'Get' with an inflight client.Close -// } else if err == grpc.ErrClientConnClosing { +// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x // // grpc balancer calls 'Get' after client.Close. // } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } // } // +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +// package clientv3 diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/kv.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/kv.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/kv.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/kv.go index 5a7469bd4c9..2b7864ad8b0 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/kv.go @@ -17,7 +17,7 @@ package clientv3 import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/lease.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/lease.go similarity index 92% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/lease.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/lease.go index 3729cf37be0..c2796fc969a 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/lease.go @@ -19,9 +19,10 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) @@ -117,22 +118,21 @@ type Lease interface { // Leases retrieves all leases. Leases(ctx context.Context) (*LeaseLeasesResponse, error) - // KeepAlive keeps the given lease alive forever. If the keepalive response - // posted to the channel is not consumed immediately, the lease client will - // continue sending keep alive requests to the etcd server at least every - // second until latest response is consumed. + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. // // The returned "LeaseKeepAliveResponse" channel closes if underlying keep // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse" - // from this closed channel is nil. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: - // no leader") or canceled by the caller (e.g. context.Canceled), the error - // is returned. Otherwise, it retries. + // given context "ctx" is canceled or timed out. // // TODO(v4.0): post errors to last keep alive message before closing - // (see https://github.com/coreos/etcd/pull/7866) + // (see https://github.com/etcd-io/etcd/pull/7866) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) // KeepAliveOnce renews the lease once. The response corresponds to the @@ -172,6 +172,8 @@ type lessor struct { firstKeepAliveOnce sync.Once callOpts []grpc.CallOption + + lg *zap.Logger } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -196,6 +198,7 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout keepAlives: make(map[LeaseID]*keepAlive), remote: remote, firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL @@ -291,7 +294,7 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl } l.mu.Unlock() - go l.keepAliveCtxCloser(id, ctx, ka.donec) + go l.keepAliveCtxCloser(ctx, id, ka.donec) l.firstKeepAliveOnce.Do(func() { go l.recvKeepAliveLoop() go l.deadlineLoop() @@ -323,7 +326,7 @@ func (l *lessor) Close() error { return nil } -func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { select { case <-donec: return @@ -459,7 +462,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { select { case <-time.After(retryConnWait): - continue case <-l.stopCtx.Done(): return l.stopCtx.Err() } @@ -469,7 +471,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { // resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) if err != nil { cancel() return nil, err @@ -518,6 +520,12 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { select { case ch <- karesp: default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } } // still advance in order to rate-limit keep-alive sends ka.nextKeepAlive = nextKeepAlive @@ -569,7 +577,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { } select { - case <-time.After(500 * time.Millisecond): + case <-time.After(retryConnWait): case <-stream.Context().Done(): return case <-l.donec: diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/logger.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/logger.go index 3276372ad38..f5ae0109dad 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/logger.go @@ -18,7 +18,7 @@ import ( "io/ioutil" "sync" - "github.com/coreos/etcd/pkg/logutil" + "go.etcd.io/etcd/pkg/logutil" "google.golang.org/grpc/grpclog" ) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/maintenance.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/maintenance.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/maintenance.go index 6db6c0e96a3..744455a3b36 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/maintenance.go @@ -19,7 +19,7 @@ import ( "fmt" "io" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/op.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/op.go similarity index 90% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/op.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/op.go index 3dca41b5faa..81ae31fd8f3 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/op.go @@ -14,7 +14,7 @@ package clientv3 -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" type opType int @@ -26,9 +26,7 @@ const ( tTxn ) -var ( - noPrefixEnd = []byte{0} -) +var noPrefixEnd = []byte{0} // Op represents an Operation that kv can execute. type Op struct { @@ -83,8 +81,15 @@ type Op struct { // accessors / mutators -func (op Op) IsTxn() bool { return op.t == tTxn } -func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} // KeyBytes returns the byte slice holding the Op's key. func (op Op) KeyBytes() []byte { return op.key } @@ -108,13 +113,13 @@ func (op Op) IsGet() bool { return op.t == tRange } func (op Op) IsDelete() bool { return op.t == tDeleteRange } // IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } +func (op Op) IsSerializable() bool { return op.serializable } // IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } +func (op Op) IsKeysOnly() bool { return op.keysOnly } // IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } +func (op Op) IsCountOnly() bool { return op.countOnly } // MinModRev returns the operation's minimum modify revision. func (op Op) MinModRev() int64 { return op.minModRev } @@ -211,13 +216,23 @@ func (op Op) isWrite() bool { return op.t != tRange } +// OpGet returns "get" operation based on given key and operation options. func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) return ret } +// OpDelete returns "delete" operation based on given key and operation options. func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } ret := Op{t: tDeleteRange, key: []byte(key)} ret.applyOpts(opts) switch { @@ -245,6 +260,7 @@ func OpDelete(key string, opts ...OpOption) Op { return ret } +// OpPut returns "put" operation based on given key-value and operation options. func OpPut(key, val string, opts ...OpOption) Op { ret := Op{t: tPut, key: []byte(key), val: []byte(val)} ret.applyOpts(opts) @@ -273,6 +289,7 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +// OpTxn returns "txn" operation based on given transaction conditions. func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} } @@ -383,7 +400,14 @@ func WithRange(endKey string) OpOption { // WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests // to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + } +} // WithSerializable makes 'Get' request serializable. By default, // it's linearizable. Serializable requests are better for lower latency @@ -472,6 +496,17 @@ func WithPrevKV() OpOption { } } +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + // WithIgnoreValue updates the key using its current value. // This option can not be combined with non-empty values. // Returns an error if the key does not exist. @@ -518,13 +553,8 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} } -// WithFragment to receive raw watch response with fragmentation. -// Fragmentation is disabled by default. If fragmentation is enabled, -// etcd watch server will split watch response before sending to clients -// when the total size of watch events exceed server-side request limit. -// The default server-side request limit is 1.5 MiB, which can be configured -// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. -// See "etcdserver/api/v3rpc/watch.go" for more details. -func WithFragment() OpOption { - return func(op *Op) { op.fragment = true } -} +// isWithPrefix returns true if WithPrefix is being called in the op +func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } + +// isWithFromKey returns true if WithFromKey is being called in the op +func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/options.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/options.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/options.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/options.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry.go similarity index 97% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry.go index 6baa52e14a5..7e855de066a 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry.go @@ -17,8 +17,8 @@ package clientv3 import ( "context" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -179,6 +179,10 @@ func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUp return rcc.cc.MemberUpdate(ctx, in, opts...) } +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + type retryMaintenanceClient struct { mc pb.MaintenanceClient } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go index c65ec81a431..080490ae292 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/sort.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/sort.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/sort.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/sort.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/txn.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/txn.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/txn.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/txn.go index c3c2d248569..c19715da438 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/txn.go @@ -18,7 +18,7 @@ import ( "context" "sync" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/utils.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/utils.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/utils.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/utils.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/watch.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/watch.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/watch.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/watch.go index 4a3b8cccda0..87d222d1d68 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/clientv3/watch.go @@ -21,9 +21,9 @@ import ( "sync" "time" - v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go similarity index 88% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go index bc1ad7bbd3d..e6a281460d5 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -40,6 +40,9 @@ var ( ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() @@ -51,6 +54,7 @@ var ( ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() @@ -69,6 +73,8 @@ var ( ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() errStringToError = map[string]error{ ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, @@ -91,6 +97,9 @@ var ( ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, @@ -102,6 +111,7 @@ var ( ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, @@ -112,6 +122,7 @@ var ( ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, @@ -119,6 +130,8 @@ var ( ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, } ) @@ -143,6 +156,9 @@ var ( ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) + ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) + ErrTooManyLearners = Error(ErrGRPCTooManyLearners) ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) @@ -154,6 +170,7 @@ var ( ErrUserNotFound = Error(ErrGRPCUserNotFound) ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrRoleEmpty = Error(ErrGRPCRoleEmpty) ErrAuthFailed = Error(ErrGRPCAuthFailed) ErrPermissionDenied = Error(ErrGRPCPermissionDenied) ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) @@ -172,6 +189,7 @@ var ( ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrCorrupt = Error(ErrGRPCCorrupt) + ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) ) // EtcdError defines gRPC server errors. diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go similarity index 75% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index 12b6763977f..9e9b42ceac7 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1,16 +1,125 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: etcdserver.proto +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashKVRequest + HashKVResponse + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchProgressRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseCheckpoint + LeaseCheckpointRequest + LeaseCheckpointResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + LeaseLeasesRequest + LeaseStatus + LeaseLeasesResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + MemberPromoteRequest + MemberPromoteResponse + DefragmentRequest + DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ package etcdserverpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,141 +134,50 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(m, src) -} -func (m *Request) XXX_Size() int { - return m.Size() -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Request proto.InternalMessageInfo +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Metadata proto.InternalMessageInfo +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } func init() { proto.RegisterType((*Request)(nil), "etcdserverpb.Request") proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") } - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } - -var fileDescriptor_09ffbeb3bebbce7e = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} - func (m *Request) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -167,133 +185,123 @@ func (m *Request) Marshal() (dAtA []byte, err error) { } func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Refresh != nil { - i-- - if *m.Refresh { + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 + i++ } - i-- - if m.Stream { + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - i-- - dAtA[i] = 0x78 - i-- - if m.Quorum { + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x70 - i-- + i++ + dAtA[i] = 0x68 + i++ if m.Sorted { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x68 - i-- - if m.Recursive { + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x60 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - i-- - dAtA[i] = 0x58 - i-- - if m.Wait { + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x50 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - i-- - dAtA[i] = 0x48 - if m.PrevExist != nil { - i-- - if *m.PrevExist { + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 + i++ } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - i-- - dAtA[i] = 0x38 - i -= len(m.PrevValue) - copy(dAtA[i:], m.PrevValue) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i-- - dAtA[i] = 0x32 - i-- - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0x28 - i -= len(m.Val) - copy(dAtA[i:], m.Val) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i-- - dAtA[i] = 0x22 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x12 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *Metadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -301,43 +309,32 @@ func (m *Metadata) Marshal() (dAtA []byte, err error) { } func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - i-- - dAtA[i] = 0x10 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - offset -= sovEtcdserver(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Request) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovEtcdserver(uint64(m.ID)) @@ -372,9 +369,6 @@ func (m *Request) Size() (n int) { } func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovEtcdserver(uint64(m.NodeID)) @@ -386,7 +380,14 @@ func (m *Metadata) Size() (n int) { } func sovEtcdserver(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozEtcdserver(x uint64) (n int) { return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -406,7 +407,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -434,7 +435,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -453,7 +454,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -463,9 +464,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -485,7 +483,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -495,9 +493,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -517,7 +512,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -527,9 +522,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -549,7 +541,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -569,7 +561,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -579,9 +571,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -601,7 +590,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PrevIndex |= uint64(b&0x7F) << shift + m.PrevIndex |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -620,7 +609,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -641,7 +630,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Expiration |= int64(b&0x7F) << shift + m.Expiration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -660,7 +649,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -680,7 +669,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Since |= uint64(b&0x7F) << shift + m.Since |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -699,7 +688,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -719,7 +708,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -739,7 +728,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -759,7 +748,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Time |= int64(b&0x7F) << shift + m.Time |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -778,7 +767,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -798,7 +787,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -814,9 +803,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthEtcdserver } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthEtcdserver - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -845,7 +831,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -873,7 +859,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodeID |= uint64(b&0x7F) << shift + m.NodeID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -892,7 +878,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterID |= uint64(b&0x7F) << shift + m.ClusterID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -906,9 +892,6 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthEtcdserver } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthEtcdserver - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -976,11 +959,8 @@ func skipEtcdserver(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthEtcdserver } return iNdEx, nil @@ -1011,9 +991,6 @@ func skipEtcdserver(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthEtcdserver - } } return iNdEx, nil case 4: @@ -1032,3 +1009,33 @@ var ( ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go similarity index 63% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index b3a199e9c7b..b170499e4b6 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -4,13 +4,15 @@ package etcdserverpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -18,162 +20,64 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type RequestHeader struct { ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // username is a username that is associated with an auth token of gRPC connection Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{0} -} -func (m *RequestHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestHeader.Merge(m, src) -} -func (m *RequestHeader) XXX_Size() int { - return m.Size() -} -func (m *RequestHeader) XXX_DiscardUnknown() { - xxx_messageInfo_RequestHeader.DiscardUnknown(m) + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` } -var xxx_messageInfo_RequestHeader proto.InternalMessageInfo +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } // An InternalRaftRequest is the union of all requests which can be // sent via raft. type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{1} -} -func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalRaftRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalRaftRequest.Merge(m, src) -} -func (m *InternalRaftRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalRaftRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m) + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` } -var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } type EmptyResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{2} -} -func (m *EmptyResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EmptyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EmptyResponse.Merge(m, src) -} -func (m *EmptyResponse) XXX_Size() int { - return m.Size() -} -func (m *EmptyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EmptyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } // What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? // InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. @@ -182,115 +86,26 @@ type InternalAuthenticateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` } func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } func (*InternalAuthenticateRequest) ProtoMessage() {} func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{3} -} -func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src) -} -func (m *InternalAuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m) + return fileDescriptorRaftInternal, []int{3} } -var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo - func init() { proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") } - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } - -var fileDescriptor_b4c9a9be0cfca103 = []byte{ - // 840 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48, - 0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd, - 0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52, - 0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e, - 0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6, - 0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4, - 0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08, - 0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86, - 0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd, - 0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3, - 0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4, - 0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d, - 0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee, - 0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63, - 0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42, - 0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34, - 0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f, - 0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08, - 0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5, - 0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99, - 0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d, - 0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c, - 0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63, - 0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44, - 0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63, - 0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02, - 0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22, - 0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b, - 0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b, - 0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b, - 0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9, - 0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5, - 0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88, - 0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c, - 0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd, - 0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17, - 0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d, - 0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60, - 0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd, - 0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25, - 0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a, - 0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29, - 0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba, - 0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54, - 0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1, - 0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1, - 0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c, - 0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f, - 0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74, - 0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74, - 0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15, - 0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff, - 0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00, -} - func (m *RequestHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -298,43 +113,33 @@ func (m *RequestHeader) Marshal() (dAtA []byte, err error) { } func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.AuthRevision != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - i-- - dAtA[i] = 0x18 + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) } if len(m.Username) > 0 { - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -342,377 +147,326 @@ func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { } func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) } - if m.AuthRoleRevokePermission != nil { - { - size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0xa2 + i += n1 } - if m.AuthRoleGrantPermission != nil { - { - size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x9a + i += n2 } - if m.AuthRoleGet != nil { - { - size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x92 + i += n3 } - if m.AuthRoleDelete != nil { - { - size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x8a + i += n4 } - if m.AuthRoleAdd != nil { - { - size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x82 + i += n5 } - if m.AuthRoleList != nil { - { - size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x9a + i += n6 } - if m.AuthUserList != nil { - { - size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x92 + i += n7 } - if m.AuthUserRevokeRole != nil { - { - size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x8a + i += n8 } - if m.AuthUserGrantRole != nil { - { - size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x82 + i += n9 } - if m.AuthUserChangePassword != nil { - { - size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseCheckpoint != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseCheckpoint.Size())) + n10, err := m.LeaseCheckpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xfa + i += n10 } - if m.AuthUserGet != nil { - { - size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n11, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xf2 + i += n11 } - if m.AuthUserDelete != nil { - { - size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n12, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xea + i += n12 } - if m.AuthUserAdd != nil { - { - size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n13, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xe2 + i += n13 } if m.Authenticate != nil { - { - size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3f - i-- dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n14, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } - if m.AuthDisable != nil { - { - size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n15, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3f - i-- - dAtA[i] = 0x9a + i += n15 } - if m.AuthEnable != nil { - { - size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n16, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 + i += n16 } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n17, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 + i += n17 } - if m.Alarm != nil { - { - size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n18, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x52 + i += n18 } - if m.LeaseRevoke != nil { - { - size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n19, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4a + i += n19 } - if m.LeaseGrant != nil { - { - size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n20, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x42 + i += n20 } - if m.Compaction != nil { - { - size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n21, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i += n21 } - if m.Txn != nil { - { - size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n22, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 + i += n22 } - if m.DeleteRange != nil { - { - size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n23, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n23 } - if m.Put != nil { - { - size, err := m.Put.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n24, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n24 } - if m.Range != nil { - { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n25, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n25 } - if m.V2 != nil { - { - size, err := m.V2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n26, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n26 } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n27, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 } - return len(dAtA) - i, nil + return i, nil } func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -720,26 +474,17 @@ func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { } func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -747,58 +492,41 @@ func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { } func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SimpleToken) > 0 { - i -= len(m.SimpleToken) - copy(dAtA[i:], m.SimpleToken) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i-- - dAtA[i] = 0x1a + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) } - return len(dAtA) - i, nil + return i, nil } func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - offset -= sovRaftInternal(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *RequestHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -811,16 +539,10 @@ func (m *RequestHeader) Size() (n int) { if m.AuthRevision != 0 { n += 1 + sovRaftInternal(uint64(m.AuthRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *InternalRaftRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -862,6 +584,10 @@ func (m *InternalRaftRequest) Size() (n int) { l = m.Alarm.Size() n += 1 + l + sovRaftInternal(uint64(l)) } + if m.LeaseCheckpoint != nil { + l = m.LeaseCheckpoint.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } if m.Header != nil { l = m.Header.Size() n += 2 + l + sovRaftInternal(uint64(l)) @@ -930,28 +656,16 @@ func (m *InternalRaftRequest) Size() (n int) { l = m.AuthRoleRevokePermission.Size() n += 2 + l + sovRaftInternal(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *EmptyResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *InternalAuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -966,14 +680,18 @@ func (m *InternalAuthenticateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRaftInternal(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovRaftInternal(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozRaftInternal(x uint64) (n int) { return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -993,7 +711,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1021,7 +739,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1040,7 +758,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1050,9 +768,6 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1072,7 +787,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AuthRevision |= uint64(b&0x7F) << shift + m.AuthRevision |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1086,13 +801,9 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1117,7 +828,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1145,7 +856,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1164,7 +875,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1173,9 +884,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1200,7 +908,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1209,9 +917,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1236,7 +941,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1245,9 +950,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1272,7 +974,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1281,9 +983,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1308,7 +1007,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1317,9 +1016,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1344,7 +1040,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1353,9 +1049,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1380,7 +1073,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1389,9 +1082,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1416,7 +1106,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1425,9 +1115,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1452,7 +1139,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1461,9 +1148,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,9 +1158,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 100: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1488,7 +1172,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1497,9 +1181,39 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseCheckpoint == nil { + m.LeaseCheckpoint = &LeaseCheckpointRequest{} + } + if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthRaftInternal } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,7 +1238,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1533,9 +1247,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1560,7 +1271,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1569,9 +1280,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1596,7 +1304,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1605,9 +1313,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,7 +1337,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1641,9 +1346,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1668,7 +1370,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1677,9 +1379,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1704,7 +1403,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1713,9 +1412,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1740,7 +1436,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1749,9 +1445,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,7 +1469,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1785,9 +1478,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1812,7 +1502,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1821,9 +1511,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1848,7 +1535,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1857,9 +1544,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1884,7 +1568,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1893,9 +1577,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1920,7 +1601,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1929,9 +1610,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1956,7 +1634,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1965,9 +1643,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1992,7 +1667,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2001,9 +1676,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2028,7 +1700,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2037,9 +1709,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2064,7 +1733,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2073,9 +1742,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2095,13 +1761,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2126,7 +1788,7 @@ func (m *EmptyResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2149,13 +1811,9 @@ func (m *EmptyResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2180,7 +1838,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2208,7 +1866,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2218,9 +1876,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2240,7 +1895,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2250,9 +1905,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2272,7 +1924,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2282,9 +1934,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2299,13 +1948,9 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2369,11 +2014,8 @@ func skipRaftInternal(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRaftInternal } return iNdEx, nil @@ -2404,9 +2046,6 @@ func skipRaftInternal(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRaftInternal - } } return iNdEx, nil case 4: @@ -2425,3 +2064,64 @@ var ( ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xcb, 0x52, 0x33, 0x45, + 0x14, 0xc7, 0x49, 0xb8, 0xa6, 0x13, 0x2e, 0x36, 0xa0, 0x6d, 0xa8, 0x8a, 0x10, 0xbc, 0xe0, 0x0d, + 0xad, 0xf0, 0x00, 0x1a, 0x13, 0x0a, 0xa8, 0xa2, 0x90, 0x9a, 0xc2, 0x2a, 0xab, 0x5c, 0x8c, 0xcd, + 0xcc, 0x21, 0x19, 0x99, 0xcc, 0x8c, 0x3d, 0x9d, 0x88, 0x6f, 0xe2, 0x63, 0x78, 0xdb, 0xbb, 0x65, + 0xe1, 0x05, 0xf5, 0x05, 0x14, 0x37, 0xee, 0xbf, 0xef, 0x01, 0xbe, 0xea, 0xcb, 0xf4, 0x64, 0x92, + 0x0e, 0xbb, 0xc9, 0x39, 0xff, 0xf3, 0xfb, 0x9f, 0x99, 0x3e, 0x07, 0x1a, 0x6d, 0x32, 0x7a, 0xc3, + 0xdd, 0x20, 0xe2, 0xc0, 0x22, 0x1a, 0x1e, 0x26, 0x2c, 0xe6, 0x31, 0xae, 0x01, 0xf7, 0xfc, 0x14, + 0xd8, 0x08, 0x58, 0x72, 0x5d, 0xdf, 0xea, 0xc5, 0xbd, 0x58, 0x26, 0x3e, 0x10, 0x4f, 0x4a, 0x53, + 0xdf, 0xc8, 0x35, 0x3a, 0x52, 0x61, 0x89, 0xa7, 0x1e, 0x9b, 0x5f, 0xa2, 0x55, 0x07, 0xbe, 0x1e, + 0x42, 0xca, 0x4f, 0x81, 0xfa, 0xc0, 0xf0, 0x1a, 0x2a, 0x9f, 0x75, 0x49, 0x69, 0xb7, 0x74, 0xb0, + 0xe0, 0x94, 0xcf, 0xba, 0xb8, 0x8e, 0x56, 0x86, 0xa9, 0xb0, 0x1c, 0x00, 0x29, 0xef, 0x96, 0x0e, + 0x2a, 0x8e, 0xf9, 0x8d, 0xf7, 0xd1, 0x2a, 0x1d, 0xf2, 0xbe, 0xcb, 0x60, 0x14, 0xa4, 0x41, 0x1c, + 0x91, 0x79, 0x59, 0x56, 0x13, 0x41, 0x47, 0xc7, 0x9a, 0xbf, 0xac, 0xa3, 0xcd, 0x33, 0xdd, 0xb5, + 0x43, 0x6f, 0xb8, 0xb6, 0x9b, 0x32, 0x7a, 0x03, 0x95, 0x47, 0x2d, 0x69, 0x51, 0x6d, 0x6d, 0x1f, + 0x8e, 0xbf, 0xd7, 0xa1, 0x2e, 0x71, 0xca, 0xa3, 0x16, 0xfe, 0x10, 0x2d, 0x32, 0x1a, 0xf5, 0x40, + 0x7a, 0x55, 0x5b, 0xf5, 0x09, 0xa5, 0x48, 0x65, 0x72, 0x25, 0xc4, 0xef, 0xa0, 0xf9, 0x64, 0xc8, + 0xc9, 0x82, 0xd4, 0x93, 0xa2, 0xfe, 0x72, 0x98, 0xf5, 0xe3, 0x08, 0x11, 0xee, 0xa0, 0x9a, 0x0f, + 0x21, 0x70, 0x70, 0x95, 0xc9, 0xa2, 0x2c, 0xda, 0x2d, 0x16, 0x75, 0xa5, 0xa2, 0x60, 0x55, 0xf5, + 0xf3, 0x98, 0x30, 0xe4, 0x77, 0x11, 0x59, 0xb2, 0x19, 0x5e, 0xdd, 0x45, 0xc6, 0x90, 0xdf, 0x45, + 0xf8, 0x23, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x8f, 0x8b, 0xef, 0xb7, 0x2c, 0x4b, 0x5e, 0x2b, 0x96, + 0x74, 0x4c, 0x3e, 0xab, 0x1c, 0x2b, 0xc1, 0x1f, 0xa3, 0x6a, 0x08, 0x34, 0x05, 0xb7, 0xc7, 0x68, + 0xc4, 0xc9, 0x8a, 0x8d, 0x70, 0x2e, 0x04, 0x27, 0x22, 0x6f, 0x08, 0xa1, 0x09, 0x89, 0x77, 0x56, + 0x04, 0x06, 0xa3, 0xf8, 0x16, 0x48, 0xc5, 0xf6, 0xce, 0x12, 0xe1, 0x48, 0x81, 0x79, 0xe7, 0x30, + 0x8f, 0x89, 0x63, 0xa1, 0x21, 0x65, 0x03, 0x82, 0x6c, 0xc7, 0xd2, 0x16, 0x29, 0x73, 0x2c, 0x52, + 0x88, 0x3f, 0x45, 0x1b, 0xca, 0xd6, 0xeb, 0x83, 0x77, 0x9b, 0xc4, 0x41, 0xc4, 0x49, 0x55, 0x16, + 0xbf, 0x6e, 0xb1, 0xee, 0x18, 0x51, 0x86, 0x59, 0x0f, 0x8b, 0x71, 0x7c, 0x84, 0x96, 0xfa, 0x72, + 0x86, 0x89, 0x2f, 0x31, 0x3b, 0xd6, 0x21, 0x52, 0x63, 0xee, 0x68, 0x29, 0x6e, 0xa3, 0xaa, 0x1c, + 0x61, 0x88, 0xe8, 0x75, 0x08, 0xe4, 0x7f, 0xeb, 0x09, 0xb4, 0x87, 0xbc, 0x7f, 0x2c, 0x05, 0xe6, + 0xfb, 0x51, 0x13, 0xc2, 0x5d, 0x24, 0x07, 0xde, 0xf5, 0x83, 0x54, 0x32, 0x9e, 0x2d, 0xdb, 0x3e, + 0xa0, 0x60, 0x74, 0x95, 0xc2, 0x7c, 0x40, 0x9a, 0xc7, 0xf0, 0x85, 0xa2, 0x40, 0xc4, 0x03, 0x8f, + 0x72, 0x20, 0xcf, 0x15, 0xe5, 0xed, 0x22, 0x25, 0x5b, 0xa4, 0xf6, 0x98, 0x34, 0xc3, 0x15, 0xea, + 0xf1, 0xb1, 0xde, 0x4d, 0xb1, 0xac, 0x2e, 0xf5, 0x7d, 0xf2, 0xeb, 0xca, 0xac, 0xb6, 0x3e, 0x4b, + 0x81, 0xb5, 0x7d, 0xbf, 0xd0, 0x96, 0x8e, 0xe1, 0x0b, 0xb4, 0x91, 0x63, 0xd4, 0x90, 0x93, 0xdf, + 0x14, 0x69, 0xdf, 0x4e, 0xd2, 0xdb, 0xa1, 0x61, 0x6b, 0xb4, 0x10, 0x2e, 0xb6, 0xd5, 0x03, 0x4e, + 0x7e, 0x7f, 0xb2, 0xad, 0x13, 0xe0, 0x53, 0x6d, 0x9d, 0x00, 0xc7, 0x3d, 0xf4, 0x6a, 0x8e, 0xf1, + 0xfa, 0x62, 0xed, 0xdc, 0x84, 0xa6, 0xe9, 0x37, 0x31, 0xf3, 0xc9, 0x1f, 0x0a, 0xf9, 0xae, 0x1d, + 0xd9, 0x91, 0xea, 0x4b, 0x2d, 0xce, 0xe8, 0x2f, 0x53, 0x6b, 0x1a, 0x7f, 0x8e, 0xb6, 0xc6, 0xfa, + 0x15, 0xfb, 0xe2, 0xb2, 0x38, 0x04, 0xf2, 0xa0, 0x3c, 0xde, 0x9c, 0xd1, 0xb6, 0xdc, 0xb5, 0x38, + 0x3f, 0xea, 0x97, 0xe8, 0x64, 0x06, 0x7f, 0x81, 0xb6, 0x73, 0xb2, 0x5a, 0x3d, 0x85, 0xfe, 0x53, + 0xa1, 0xdf, 0xb2, 0xa3, 0xf5, 0x0e, 0x8e, 0xb1, 0x31, 0x9d, 0x4a, 0xe1, 0x53, 0xb4, 0x96, 0xc3, + 0xc3, 0x20, 0xe5, 0xe4, 0x2f, 0x45, 0xdd, 0xb3, 0x53, 0xcf, 0x83, 0x94, 0x17, 0xe6, 0x28, 0x0b, + 0x1a, 0x92, 0x68, 0x4d, 0x91, 0xfe, 0x9e, 0x49, 0x12, 0xd6, 0x53, 0xa4, 0x2c, 0x68, 0x8e, 0x5e, + 0x92, 0xc4, 0x44, 0x7e, 0x5f, 0x99, 0x75, 0xf4, 0xa2, 0x66, 0x72, 0x22, 0x75, 0xcc, 0x4c, 0xa4, + 0xc4, 0xe8, 0x89, 0xfc, 0xa1, 0x32, 0x6b, 0x22, 0x45, 0x95, 0x65, 0x22, 0xf3, 0x70, 0xb1, 0x2d, + 0x31, 0x91, 0x3f, 0x3e, 0xd9, 0xd6, 0xe4, 0x44, 0xea, 0x18, 0xfe, 0x0a, 0xd5, 0xc7, 0x30, 0x72, + 0x50, 0x12, 0x60, 0x83, 0x20, 0x95, 0xff, 0x18, 0x7f, 0x52, 0xcc, 0xf7, 0x66, 0x30, 0x85, 0xfc, + 0xd2, 0xa8, 0x33, 0xfe, 0x2b, 0xd4, 0x9e, 0xc7, 0x03, 0xb4, 0x93, 0x7b, 0xe9, 0xd1, 0x19, 0x33, + 0xfb, 0x59, 0x99, 0xbd, 0x6f, 0x37, 0x53, 0x53, 0x32, 0xed, 0x46, 0xe8, 0x0c, 0x41, 0x73, 0x1d, + 0xad, 0x1e, 0x0f, 0x12, 0xfe, 0xad, 0x03, 0x69, 0x12, 0x47, 0x29, 0x34, 0x13, 0xb4, 0xf3, 0xc4, + 0x1f, 0x22, 0x8c, 0xd1, 0x82, 0xbc, 0x2e, 0x94, 0xe4, 0x75, 0x41, 0x3e, 0x8b, 0x6b, 0x84, 0xd9, + 0x4f, 0x7d, 0x8d, 0xc8, 0x7e, 0xe3, 0x3d, 0x54, 0x4b, 0x83, 0x41, 0x12, 0x82, 0xcb, 0xe3, 0x5b, + 0x50, 0xb7, 0x88, 0x8a, 0x53, 0x55, 0xb1, 0x2b, 0x11, 0xfa, 0x64, 0xeb, 0xfe, 0xdf, 0xc6, 0xdc, + 0xfd, 0x63, 0xa3, 0xf4, 0xf0, 0xd8, 0x28, 0xfd, 0xf3, 0xd8, 0x28, 0x7d, 0xf7, 0x5f, 0x63, 0xee, + 0x7a, 0x49, 0xde, 0x61, 0x8e, 0x5e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x36, 0xf0, 0x6f, 0x1b, + 0x09, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto similarity index 97% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto index 25d45d3c4f2..7111f4572b2 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -37,6 +37,8 @@ message InternalRaftRequest { AlarmRequest alarm = 10; + LeaseCheckpointRequest lease_checkpoint = 11; + AuthEnableRequest auth_enable = 1000; AuthDisableRequest auth_disable = 1011; @@ -71,4 +73,3 @@ message InternalAuthenticateRequest { // simple_token is generated in API layer (etcdserver/v3_server.go) string simple_token = 3; } - diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go similarity index 62% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go index a0cff8ffd70..199ee6244d5 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -4,20 +4,23 @@ package etcdserverpb import ( - context "context" - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" - authpb "github.com/coreos/etcd/auth/authpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" + + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + authpb "go.etcd.io/etcd/auth/authpb" + + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,12 +28,6 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type AlarmType int32 const ( @@ -44,7 +41,6 @@ var AlarmType_name = map[int32]string{ 1: "NOSPACE", 2: "CORRUPT", } - var AlarmType_value = map[string]int32{ "NONE": 0, "NOSPACE": 1, @@ -54,10 +50,7 @@ var AlarmType_value = map[string]int32{ func (x AlarmType) String() string { return proto.EnumName(AlarmType_name, int32(x)) } - -func (AlarmType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } type RangeRequest_SortOrder int32 @@ -72,7 +65,6 @@ var RangeRequest_SortOrder_name = map[int32]string{ 1: "ASCEND", 2: "DESCEND", } - var RangeRequest_SortOrder_value = map[string]int32{ "NONE": 0, "ASCEND": 1, @@ -82,10 +74,7 @@ var RangeRequest_SortOrder_value = map[string]int32{ func (x RangeRequest_SortOrder) String() string { return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) } - -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 0} -} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } type RangeRequest_SortTarget int32 @@ -104,7 +93,6 @@ var RangeRequest_SortTarget_name = map[int32]string{ 3: "MOD", 4: "VALUE", } - var RangeRequest_SortTarget_value = map[string]int32{ "KEY": 0, "VERSION": 1, @@ -116,10 +104,7 @@ var RangeRequest_SortTarget_value = map[string]int32{ func (x RangeRequest_SortTarget) String() string { return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) } - -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 1} -} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } type Compare_CompareResult int32 @@ -136,7 +121,6 @@ var Compare_CompareResult_name = map[int32]string{ 2: "LESS", 3: "NOT_EQUAL", } - var Compare_CompareResult_value = map[string]int32{ "EQUAL": 0, "GREATER": 1, @@ -147,10 +131,7 @@ var Compare_CompareResult_value = map[string]int32{ func (x Compare_CompareResult) String() string { return proto.EnumName(Compare_CompareResult_name, int32(x)) } - -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 0} -} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } type Compare_CompareTarget int32 @@ -169,7 +150,6 @@ var Compare_CompareTarget_name = map[int32]string{ 3: "VALUE", 4: "LEASE", } - var Compare_CompareTarget_value = map[string]int32{ "VERSION": 0, "CREATE": 1, @@ -181,10 +161,7 @@ var Compare_CompareTarget_value = map[string]int32{ func (x Compare_CompareTarget) String() string { return proto.EnumName(Compare_CompareTarget_name, int32(x)) } - -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 1} -} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } type WatchCreateRequest_FilterType int32 @@ -199,7 +176,6 @@ var WatchCreateRequest_FilterType_name = map[int32]string{ 0: "NOPUT", 1: "NODELETE", } - var WatchCreateRequest_FilterType_value = map[string]int32{ "NOPUT": 0, "NODELETE": 1, @@ -208,9 +184,8 @@ var WatchCreateRequest_FilterType_value = map[string]int32{ func (x WatchCreateRequest_FilterType) String() string { return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) } - func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21, 0} + return fileDescriptorRpc, []int{21, 0} } type AlarmRequest_AlarmAction int32 @@ -226,7 +201,6 @@ var AlarmRequest_AlarmAction_name = map[int32]string{ 1: "ACTIVATE", 2: "DEACTIVATE", } - var AlarmRequest_AlarmAction_value = map[string]int32{ "GET": 0, "ACTIVATE": 1, @@ -236,9 +210,8 @@ var AlarmRequest_AlarmAction_value = map[string]int32{ func (x AlarmRequest_AlarmAction) String() string { return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) } - func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49, 0} + return fileDescriptorRpc, []int{54, 0} } type ResponseHeader struct { @@ -252,44 +225,13 @@ type ResponseHeader struct { // header.revision number. Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} -func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseHeader.Merge(m, src) -} -func (m *ResponseHeader) XXX_Size() int { - return m.Size() -} -func (m *ResponseHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseHeader.DiscardUnknown(m) + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` } -var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } func (m *ResponseHeader) GetClusterId() uint64 { if m != nil { @@ -357,48 +299,17 @@ type RangeRequest struct { // greater mod revisions will be filtered away. MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. + // lesser create revisions will be filtered away. MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` // max_create_revision is the upper bound for returned key create revisions; all keys with // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} -} -func (m *RangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeRequest.Merge(m, src) -} -func (m *RangeRequest) XXX_Size() int { - return m.Size() -} -func (m *RangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RangeRequest.DiscardUnknown(m) + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` } -var xxx_messageInfo_RangeRequest proto.InternalMessageInfo +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } func (m *RangeRequest) GetKey() []byte { if m != nil { @@ -492,51 +403,20 @@ func (m *RangeRequest) GetMaxCreateRevision() int64 { } type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"` + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` // more indicates if there are more keys to return in the requested range. More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{2} -} -func (m *RangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeResponse.Merge(m, src) -} -func (m *RangeResponse) XXX_Size() int { - return m.Size() -} -func (m *RangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RangeResponse.DiscardUnknown(m) + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` } -var xxx_messageInfo_RangeResponse proto.InternalMessageInfo +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } func (m *RangeResponse) GetHeader() *ResponseHeader { if m != nil { @@ -582,44 +462,13 @@ type PutRequest struct { IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` // If ignore_lease is set, etcd updates the key using its current lease. // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{3} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(m, src) -} -func (m *PutRequest) XXX_Size() int { - return m.Size() -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } -var xxx_messageInfo_PutRequest proto.InternalMessageInfo +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } func (m *PutRequest) GetKey() []byte { if m != nil { @@ -664,46 +513,15 @@ func (m *PutRequest) GetIgnoreLease() bool { } type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{4} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(m, src) -} -func (m *PutResponse) XXX_Size() int { - return m.Size() -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } -var xxx_messageInfo_PutResponse proto.InternalMessageInfo +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } func (m *PutResponse) GetHeader() *ResponseHeader { if m != nil { @@ -730,44 +548,13 @@ type DeleteRangeRequest struct { RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. // The previous key-value pairs will be returned in the delete response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{5} -} -func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeRequest.Merge(m, src) -} -func (m *DeleteRangeRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m) + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } -var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } func (m *DeleteRangeRequest) GetKey() []byte { if m != nil { @@ -791,48 +578,17 @@ func (m *DeleteRangeRequest) GetPrevKv() bool { } type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{6} -} -func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeResponse.Merge(m, src) -} -func (m *DeleteRangeResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m) + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` } -var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { if m != nil { @@ -863,44 +619,13 @@ type RequestOp struct { // *RequestOp_RequestPut // *RequestOp_RequestDeleteRange // *RequestOp_RequestTxn - Request isRequestOp_Request `protobuf_oneof:"request"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} -} -func (m *RequestOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestOp.Merge(m, src) -} -func (m *RequestOp) XXX_Size() int { - return m.Size() -} -func (m *RequestOp) XXX_DiscardUnknown() { - xxx_messageInfo_RequestOp.DiscardUnknown(m) + Request isRequestOp_Request `protobuf_oneof:"request"` } -var xxx_messageInfo_RequestOp proto.InternalMessageInfo +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } type isRequestOp_Request interface { isRequestOp_Request() @@ -909,16 +634,16 @@ type isRequestOp_Request interface { } type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"` + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` } type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"` + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` } type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"` + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` } type RequestOp_RequestTxn struct { - RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"` + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` } func (*RequestOp_RequestRange) isRequestOp_Request() {} @@ -1048,22 +773,22 @@ func _RequestOp_OneofSizer(msg proto.Message) (n int) { switch x := m.Request.(type) { case *RequestOp_RequestRange: s := proto.Size(x.RequestRange) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestPut: s := proto.Size(x.RequestPut) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestDeleteRange: s := proto.Size(x.RequestDeleteRange) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestTxn: s := proto.Size(x.RequestTxn) - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -1081,44 +806,13 @@ type ResponseOp struct { // *ResponseOp_ResponsePut // *ResponseOp_ResponseDeleteRange // *ResponseOp_ResponseTxn - Response isResponseOp_Response `protobuf_oneof:"response"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{8} -} -func (m *ResponseOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseOp.Merge(m, src) -} -func (m *ResponseOp) XXX_Size() int { - return m.Size() -} -func (m *ResponseOp) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseOp.DiscardUnknown(m) + Response isResponseOp_Response `protobuf_oneof:"response"` } -var xxx_messageInfo_ResponseOp proto.InternalMessageInfo +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } type isResponseOp_Response interface { isResponseOp_Response() @@ -1127,16 +821,16 @@ type isResponseOp_Response interface { } type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"` + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` } type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"` + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` } type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"` + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` } type ResponseOp_ResponseTxn struct { - ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"` + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` } func (*ResponseOp_ResponseRange) isResponseOp_Response() {} @@ -1266,22 +960,22 @@ func _ResponseOp_OneofSizer(msg proto.Message) (n int) { switch x := m.Response.(type) { case *ResponseOp_ResponseRange: s := proto.Size(x.ResponseRange) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponsePut: s := proto.Size(x.ResponsePut) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponseDeleteRange: s := proto.Size(x.ResponseDeleteRange) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponseTxn: s := proto.Size(x.ResponseTxn) - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -1307,44 +1001,13 @@ type Compare struct { TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` // range_end compares the given target to all keys in the range [key, range_end). // See RangeRequest for more details on key ranges. - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9} -} -func (m *Compare) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Compare.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Compare) XXX_Merge(src proto.Message) { - xxx_messageInfo_Compare.Merge(m, src) -} -func (m *Compare) XXX_Size() int { - return m.Size() -} -func (m *Compare) XXX_DiscardUnknown() { - xxx_messageInfo_Compare.DiscardUnknown(m) + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } -var xxx_messageInfo_Compare proto.InternalMessageInfo +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } type isCompare_TargetUnion interface { isCompare_TargetUnion() @@ -1529,20 +1192,20 @@ func _Compare_OneofSizer(msg proto.Message) (n int) { // target_union switch x := m.TargetUnion.(type) { case *Compare_Version: - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Version)) case *Compare_CreateRevision: - n += 1 // tag and wire + n += proto.SizeVarint(5<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.CreateRevision)) case *Compare_ModRevision: - n += 1 // tag and wire + n += proto.SizeVarint(6<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ModRevision)) case *Compare_Value: - n += 1 // tag and wire + n += proto.SizeVarint(7<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Value))) n += len(x.Value) case *Compare_Lease: - n += 1 // tag and wire + n += proto.SizeVarint(8<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Lease)) case nil: default: @@ -1572,48 +1235,17 @@ type TxnRequest struct { // and the response will contain their respective responses in order. // If the comparisons fail, then the failure requests will be processed in order, // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"` + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"` + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{10} -} -func (m *TxnRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnRequest.Merge(m, src) -} -func (m *TxnRequest) XXX_Size() int { - return m.Size() -} -func (m *TxnRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TxnRequest.DiscardUnknown(m) + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` } -var xxx_messageInfo_TxnRequest proto.InternalMessageInfo +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } func (m *TxnRequest) GetCompare() []*Compare { if m != nil { @@ -1637,49 +1269,18 @@ func (m *TxnRequest) GetFailure() []*RequestOp { } type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // succeeded is set to true if the compare evaluated to true or false otherwise. Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` // responses is a list of responses corresponding to the results from applying // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{11} -} -func (m *TxnResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnResponse.Merge(m, src) -} -func (m *TxnResponse) XXX_Size() int { - return m.Size() -} -func (m *TxnResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TxnResponse.DiscardUnknown(m) + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` } -var xxx_messageInfo_TxnResponse proto.InternalMessageInfo +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } func (m *TxnResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1710,44 +1311,13 @@ type CompactionRequest struct { // physical is set so the RPC will wait until the compaction is physically // applied to the local database such that compacted entries are totally // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{12} -} -func (m *CompactionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionRequest.Merge(m, src) -} -func (m *CompactionRequest) XXX_Size() int { - return m.Size() -} -func (m *CompactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionRequest.DiscardUnknown(m) + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` } -var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } func (m *CompactionRequest) GetRevision() int64 { if m != nil { @@ -1764,44 +1334,13 @@ func (m *CompactionRequest) GetPhysical() bool { } type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{13} -} -func (m *CompactionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionResponse.Merge(m, src) -} -func (m *CompactionResponse) XXX_Size() int { - return m.Size() -} -func (m *CompactionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } func (m *CompactionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1811,84 +1350,22 @@ func (m *CompactionResponse) GetHeader() *ResponseHeader { } type HashRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{14} -} -func (m *HashRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashRequest.Merge(m, src) -} -func (m *HashRequest) XXX_Size() int { - return m.Size() -} -func (m *HashRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashRequest.DiscardUnknown(m) } -var xxx_messageInfo_HashRequest proto.InternalMessageInfo +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } type HashKVRequest struct { // revision is the key-value store revision for the hash operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } -func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } -func (*HashKVRequest) ProtoMessage() {} -func (*HashKVRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{15} -} -func (m *HashKVRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVRequest.Merge(m, src) -} -func (m *HashKVRequest) XXX_Size() int { - return m.Size() -} -func (m *HashKVRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVRequest.DiscardUnknown(m) + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` } -var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } func (m *HashKVRequest) GetRevision() int64 { if m != nil { @@ -1898,48 +1375,17 @@ func (m *HashKVRequest) GetRevision() int64 { } type HashKVResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // hash is the hash value computed from the responding member's MVCC keys up to a given revision. Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } -func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } -func (*HashKVResponse) ProtoMessage() {} -func (*HashKVResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{16} -} -func (m *HashKVResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVResponse.Merge(m, src) -} -func (m *HashKVResponse) XXX_Size() int { - return m.Size() -} -func (m *HashKVResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVResponse.DiscardUnknown(m) + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` } -var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } func (m *HashKVResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1963,46 +1409,15 @@ func (m *HashKVResponse) GetCompactRevision() int64 { } type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // hash is the hash value computed from the responding member's KV's backend. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{17} -} -func (m *HashResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashResponse.Merge(m, src) -} -func (m *HashResponse) XXX_Size() int { - return m.Size() -} -func (m *HashResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashResponse.DiscardUnknown(m) + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` } -var xxx_messageInfo_HashResponse proto.InternalMessageInfo +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } func (m *HashResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2019,89 +1434,27 @@ func (m *HashResponse) GetHash() uint32 { } type SnapshotRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{18} -} -func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotRequest.Merge(m, src) -} -func (m *SnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *SnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotRequest.DiscardUnknown(m) } -var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } type SnapshotResponse struct { // header has the current key-value store information. The first header in the snapshot // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // remaining_bytes is the number of blob bytes to be sent after this message RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{19} -} -func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotResponse.Merge(m, src) -} -func (m *SnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *SnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotResponse.DiscardUnknown(m) + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` } -var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } func (m *SnapshotResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2131,44 +1484,13 @@ type WatchRequest struct { // *WatchRequest_CreateRequest // *WatchRequest_CancelRequest // *WatchRequest_ProgressRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{20} -} -func (m *WatchRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchRequest.Merge(m, src) -} -func (m *WatchRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchRequest.DiscardUnknown(m) + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` } -var xxx_messageInfo_WatchRequest proto.InternalMessageInfo +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } type isWatchRequest_RequestUnion interface { isWatchRequest_RequestUnion() @@ -2177,13 +1499,13 @@ type isWatchRequest_RequestUnion interface { } type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"` + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` } type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"` + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` } type WatchRequest_ProgressRequest struct { - ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"` + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,oneof"` } func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} @@ -2291,17 +1613,17 @@ func _WatchRequest_OneofSizer(msg proto.Message) (n int) { switch x := m.RequestUnion.(type) { case *WatchRequest_CreateRequest: s := proto.Size(x.CreateRequest) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *WatchRequest_CancelRequest: s := proto.Size(x.CancelRequest) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *WatchRequest_ProgressRequest: s := proto.Size(x.ProgressRequest) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -2328,7 +1650,7 @@ type WatchCreateRequest struct { // The etcd server may decide how often it will send notifications based on current load. ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` // If prev_kv is set, created watcher gets the previous KV before the event happens. // If the previous KV is already compacted, nothing will be returned. PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` @@ -2339,44 +1661,13 @@ type WatchCreateRequest struct { // use on the stream will cause an error to be returned. WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` // fragment enables splitting large revisions into multiple watch responses. - Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21} -} -func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCreateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCreateRequest.Merge(m, src) -} -func (m *WatchCreateRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCreateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m) + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` } -var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } func (m *WatchCreateRequest) GetKey() []byte { if m != nil { @@ -2436,44 +1727,13 @@ func (m *WatchCreateRequest) GetFragment() bool { type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{22} -} -func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCancelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCancelRequest.Merge(m, src) -} -func (m *WatchCancelRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCancelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m) + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` } -var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } func (m *WatchCancelRequest) GetWatchId() int64 { if m != nil { @@ -2485,46 +1745,15 @@ func (m *WatchCancelRequest) GetWatchId() int64 { // Requests the a watch stream progress status be sent in the watch response stream as soon as // possible. type WatchProgressRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } -func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } -func (*WatchProgressRequest) ProtoMessage() {} -func (*WatchProgressRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{23} -} -func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchProgressRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchProgressRequest.Merge(m, src) -} -func (m *WatchProgressRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchProgressRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m) } -var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` // created is set to true if the response is for a create watch request. @@ -2547,45 +1776,14 @@ type WatchResponse struct { // cancel_reason indicates the reason for canceling the watcher. CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` // framgment is true if large watch response was split over multiple responses. - Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{24} -} -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) -} -func (m *WatchResponse) XXX_Size() int { - return m.Size() -} -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } func (m *WatchResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2647,44 +1845,13 @@ type LeaseGrantRequest struct { // TTL is the advisory time-to-live in seconds. Expired lease will return -1. TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{25} -} -func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantRequest.Merge(m, src) -} -func (m *LeaseGrantRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m) + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } func (m *LeaseGrantRequest) GetTTL() int64 { if m != nil { @@ -2701,49 +1868,18 @@ func (m *LeaseGrantRequest) GetID() int64 { } type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{26} -} -func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantResponse.Merge(m, src) -} -func (m *LeaseGrantResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m) + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` } -var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2775,93 +1911,89 @@ func (m *LeaseGrantResponse) GetError() string { type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{27} -} -func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID } + return 0 } -func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeRequest.Merge(m, src) + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -func (m *LeaseRevokeRequest) XXX_Size() int { - return m.Size() + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil } -func (m *LeaseRevokeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m) + +type LeaseCheckpoint struct { + // ID is the lease ID to checkpoint. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Remaining_TTL is the remaining time until expiry of the lease. + Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` } -var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } -func (m *LeaseRevokeRequest) GetID() int64 { +func (m *LeaseCheckpoint) GetID() int64 { if m != nil { return m.ID } return 0 } -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{28} -} -func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.Remaining_TTL } + return 0 } -func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeResponse.Merge(m, src) + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` } -func (m *LeaseRevokeResponse) XXX_Size() int { - return m.Size() + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil } -func (m *LeaseRevokeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m) + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { if m != nil { return m.Header } @@ -2870,44 +2002,13 @@ func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { type LeaseKeepAliveRequest struct { // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{29} -} -func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src) -} -func (m *LeaseKeepAliveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m) + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } func (m *LeaseKeepAliveRequest) GetID() int64 { if m != nil { @@ -2917,48 +2018,17 @@ func (m *LeaseKeepAliveRequest) GetID() int64 { } type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{30} -} -func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src) -} -func (m *LeaseKeepAliveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m) + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` } -var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2985,44 +2055,13 @@ type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{31} -} -func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src) -} -func (m *LeaseTimeToLiveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m) + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` } -var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } func (m *LeaseTimeToLiveRequest) GetID() int64 { if m != nil { @@ -3039,7 +2078,7 @@ func (m *LeaseTimeToLiveRequest) GetKeys() bool { } type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. @@ -3047,44 +2086,13 @@ type LeaseTimeToLiveResponse struct { // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{32} -} -func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src) -} -func (m *LeaseTimeToLiveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m) + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` } -var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3122,83 +2130,21 @@ func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { } type LeaseLeasesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } -func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesRequest) ProtoMessage() {} -func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{33} -} -func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesRequest.Merge(m, src) -} -func (m *LeaseLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m) } -var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } type LeaseStatus struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } -func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } -func (*LeaseStatus) ProtoMessage() {} -func (*LeaseStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{34} -} -func (m *LeaseStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseStatus.Merge(m, src) -} -func (m *LeaseStatus) XXX_Size() int { - return m.Size() -} -func (m *LeaseStatus) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseStatus.DiscardUnknown(m) + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } func (m *LeaseStatus) GetID() int64 { if m != nil { @@ -3208,45 +2154,14 @@ func (m *LeaseStatus) GetID() int64 { } type LeaseLeasesResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } -func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesResponse) ProtoMessage() {} -func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{35} -} -func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesResponse.Merge(m, src) -} -func (m *LeaseLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` } -var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3268,46 +2183,17 @@ type Member struct { // name is the human-readable name of the member. If the member is not started, the name will be an empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{36} -} -func (m *Member) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Member.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Member) XXX_Merge(src proto.Message) { - xxx_messageInfo_Member.Merge(m, src) -} -func (m *Member) XXX_Size() int { - return m.Size() -} -func (m *Member) XXX_DiscardUnknown() { - xxx_messageInfo_Member.DiscardUnknown(m) + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` } -var xxx_messageInfo_Member proto.InternalMessageInfo +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } func (m *Member) GetID() uint64 { if m != nil { @@ -3337,46 +2223,24 @@ func (m *Member) GetClientURLs() []string { return nil } -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{37} -} -func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner } + return false } -func (m *MemberAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddRequest.Merge(m, src) -} -func (m *MemberAddRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddRequest.DiscardUnknown(m) + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` + // isLearner indicates if the added member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` } -var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } func (m *MemberAddRequest) GetPeerURLs() []string { if m != nil { @@ -3385,49 +2249,25 @@ func (m *MemberAddRequest) GetPeerURLs() []string { return nil } +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"` + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{38} -} -func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddResponse.Merge(m, src) -} -func (m *MemberAddResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } func (m *MemberAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3452,44 +2292,13 @@ func (m *MemberAddResponse) GetMembers() []*Member { type MemberRemoveRequest struct { // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{39} -} -func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveRequest.Merge(m, src) -} -func (m *MemberRemoveRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m) + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } func (m *MemberRemoveRequest) GetID() uint64 { if m != nil { @@ -3499,46 +2308,15 @@ func (m *MemberRemoveRequest) GetID() uint64 { } type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{40} -} -func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveResponse.Merge(m, src) -} -func (m *MemberRemoveResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3558,44 +2336,13 @@ type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{41} -} -func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateRequest.Merge(m, src) -} -func (m *MemberUpdateRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m) + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` } -var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } func (m *MemberUpdateRequest) GetID() uint64 { if m != nil { @@ -3612,46 +2359,15 @@ func (m *MemberUpdateRequest) GetPeerURLs() []string { } type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{42} -} -func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateResponse.Merge(m, src) -} -func (m *MemberUpdateResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3668,85 +2384,23 @@ func (m *MemberUpdateResponse) GetMembers() []*Member { } type MemberListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{43} -} -func (m *MemberListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListRequest.Merge(m, src) -} -func (m *MemberListRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListRequest.DiscardUnknown(m) } -var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{44} -} -func (m *MemberListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListResponse.Merge(m, src) -} -func (m *MemberListResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } func (m *MemberListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3762,84 +2416,64 @@ func (m *MemberListResponse) GetMembers() []*Member { return nil } -type DefragmentRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{45} -} -func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DefragmentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentRequest.Merge(m, src) +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -func (m *DefragmentRequest) XXX_Size() int { - return m.Size() + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 } -func (m *DefragmentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentRequest.DiscardUnknown(m) + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{46} -} -func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header } + return nil } -func (m *DefragmentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentResponse.Merge(m, src) + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil } -func (m *DefragmentResponse) XXX_Size() int { - return m.Size() + +type DefragmentRequest struct { } -func (m *DefragmentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentResponse.DiscardUnknown(m) + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } func (m *DefragmentResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3850,44 +2484,13 @@ func (m *DefragmentResponse) GetHeader() *ResponseHeader { type MoveLeaderRequest struct { // targetID is the node ID for the new leader. - TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } -func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderRequest) ProtoMessage() {} -func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{47} -} -func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderRequest.Merge(m, src) -} -func (m *MoveLeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m) + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` } -var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } func (m *MoveLeaderRequest) GetTargetID() uint64 { if m != nil { @@ -3897,44 +2500,13 @@ func (m *MoveLeaderRequest) GetTargetID() uint64 { } type MoveLeaderResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } -func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderResponse) ProtoMessage() {} -func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{48} -} -func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderResponse.Merge(m, src) -} -func (m *MoveLeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3952,44 +2524,13 @@ type AlarmRequest struct { // alarm request covers all members. MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49} -} -func (m *AlarmRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmRequest.Merge(m, src) -} -func (m *AlarmRequest) XXX_Size() int { - return m.Size() -} -func (m *AlarmRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmRequest.DiscardUnknown(m) + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` } -var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { if m != nil { @@ -4016,44 +2557,13 @@ type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{50} -} -func (m *AlarmMember) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmMember) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmMember.Merge(m, src) -} -func (m *AlarmMember) XXX_Size() int { - return m.Size() -} -func (m *AlarmMember) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmMember.DiscardUnknown(m) + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` } -var xxx_messageInfo_AlarmMember proto.InternalMessageInfo +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } func (m *AlarmMember) GetMemberID() uint64 { if m != nil { @@ -4070,46 +2580,15 @@ func (m *AlarmMember) GetAlarm() AlarmType { } type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{51} -} -func (m *AlarmResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmResponse.Merge(m, src) -} -func (m *AlarmResponse) XXX_Size() int { - return m.Size() -} -func (m *AlarmResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmResponse.DiscardUnknown(m) + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` } -var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } func (m *AlarmResponse) GetHeader() *ResponseHeader { if m != nil { @@ -4126,93 +2605,39 @@ func (m *AlarmResponse) GetAlarms() []*AlarmMember { } type StatusRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{52} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) } -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // version is the cluster protocol version used by the responding member. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` // leader is the member ID which the responding member believes is the current leader. Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. + // raftIndex is the current raft committed index of the responding member. RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{53} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } func (m *StatusResponse) GetHeader() *ResponseHeader { if m != nil { @@ -4256,124 +2681,59 @@ func (m *StatusResponse) GetRaftTerm() uint64 { return 0 } -type AuthEnableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54} -} -func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex } -} -func (m *AuthEnableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableRequest.Merge(m, src) -} -func (m *AuthEnableRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m) + return 0 } -var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} -type AuthDisableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{55} -} -func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse } + return 0 } -func (m *AuthDisableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableRequest.Merge(m, src) + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false } -func (m *AuthDisableRequest) XXX_Size() int { - return m.Size() + +type AuthEnableRequest struct { } -func (m *AuthDisableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m) + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } + +type AuthDisableRequest struct { } -var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{56} -} -func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateRequest.Merge(m, src) -} -func (m *AuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` } -var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } func (m *AuthenticateRequest) GetName() string { if m != nil { @@ -4390,45 +2750,15 @@ func (m *AuthenticateRequest) GetPassword() string { } type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57} -} -func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddRequest.Merge(m, src) -} -func (m *AuthUserAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` } -var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } func (m *AuthUserAddRequest) GetName() string { if m != nil { @@ -4444,45 +2774,21 @@ func (m *AuthUserAddRequest) GetPassword() string { return "" } -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{58} -} -func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options } + return nil } -func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetRequest.Merge(m, src) -} -func (m *AuthUserGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m) + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } func (m *AuthUserGetRequest) GetName() string { if m != nil { @@ -4493,44 +2799,13 @@ func (m *AuthUserGetRequest) GetName() string { type AuthUserDeleteRequest struct { // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{59} -} -func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src) -} -func (m *AuthUserDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } func (m *AuthUserDeleteRequest) GetName() string { if m != nil { @@ -4543,45 +2818,16 @@ type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // password is the new password for the user. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` } func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordRequest) ProtoMessage() {} func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{60} -} -func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src) -} -func (m *AuthUserChangePasswordRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m) + return fileDescriptorRpc, []int{65} } -var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo - func (m *AuthUserChangePasswordRequest) GetName() string { if m != nil { return m.Name @@ -4600,44 +2846,13 @@ type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{61} -} -func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src) -} -func (m *AuthUserGrantRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } func (m *AuthUserGrantRoleRequest) GetUser() string { if m != nil { @@ -4654,45 +2869,14 @@ func (m *AuthUserGrantRoleRequest) GetRole() string { } type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62} -} -func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src) -} -func (m *AuthUserRevokeRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } func (m *AuthUserRevokeRoleRequest) GetName() string { if m != nil { @@ -4710,44 +2894,13 @@ func (m *AuthUserRevokeRoleRequest) GetRole() string { type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{63} -} -func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddRequest.Merge(m, src) -} -func (m *AuthRoleAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } func (m *AuthRoleAddRequest) GetName() string { if m != nil { @@ -4757,44 +2910,13 @@ func (m *AuthRoleAddRequest) GetName() string { } type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64} -} -func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetRequest.Merge(m, src) -} -func (m *AuthRoleGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } func (m *AuthRoleGetRequest) GetRole() string { if m != nil { @@ -4804,122 +2926,29 @@ func (m *AuthRoleGetRequest) GetRole() string { } type AuthUserListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{65} -} -func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListRequest.Merge(m, src) -} -func (m *AuthUserListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m) } -var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } type AuthRoleListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{66} -} -func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListRequest.Merge(m, src) -} -func (m *AuthRoleListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m) } -var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{67} -} -func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src) -} -func (m *AuthRoleDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } func (m *AuthRoleDeleteRequest) GetRole() string { if m != nil { @@ -4932,44 +2961,15 @@ type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` } func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{68} -} -func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return fileDescriptorRpc, []int{73} } -func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo func (m *AuthRoleGrantPermissionRequest) GetName() string { if m != nil { @@ -4986,47 +2986,18 @@ func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { } type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{69} -} -func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m) + return fileDescriptorRpc, []int{74} } -var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo - func (m *AuthRoleRevokePermissionRequest) GetRole() string { if m != nil { return m.Role @@ -5034,59 +3005,28 @@ func (m *AuthRoleRevokePermissionRequest) GetRole() string { return "" } -func (m *AuthRoleRevokePermissionRequest) GetKey() string { +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { if m != nil { return m.Key } - return "" + return nil } -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { if m != nil { return m.RangeEnd } - return "" + return nil } type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{70} -} -func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthEnableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableResponse.Merge(m, src) -} -func (m *AuthEnableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } func (m *AuthEnableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5096,44 +3036,13 @@ func (m *AuthEnableResponse) GetHeader() *ResponseHeader { } type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{71} -} -func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthDisableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableResponse.Merge(m, src) -} -func (m *AuthDisableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthDisableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } func (m *AuthDisableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5143,46 +3052,15 @@ func (m *AuthDisableResponse) GetHeader() *ResponseHeader { } type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72} -} -func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateResponse.Merge(m, src) -} -func (m *AuthenticateResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m) + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` } -var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } func (m *AuthenticateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5199,44 +3077,13 @@ func (m *AuthenticateResponse) GetToken() string { } type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{73} -} -func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddResponse.Merge(m, src) -} -func (m *AuthUserAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5246,45 +3093,14 @@ func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { } type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74} -} -func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetResponse.Merge(m, src) -} -func (m *AuthUserGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` } -var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5301,44 +3117,13 @@ func (m *AuthUserGetResponse) GetRoles() []string { } type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{75} -} -func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src) -} -func (m *AuthUserDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5348,44 +3133,15 @@ func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { } type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordResponse) ProtoMessage() {} func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{76} -} -func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src) -} -func (m *AuthUserChangePasswordResponse) XXX_Size() int { - return m.Size() + return fileDescriptorRpc, []int{81} } -func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5395,44 +3151,13 @@ func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { } type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{77} -} -func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src) -} -func (m *AuthUserGrantRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5442,44 +3167,13 @@ func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { } type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{78} -} -func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src) -} -func (m *AuthUserRevokeRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{83} } func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5489,44 +3183,13 @@ func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { } type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{79} -} -func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddResponse.Merge(m, src) -} -func (m *AuthRoleAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{84} } func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5536,45 +3199,14 @@ func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { } type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{80} -} -func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetResponse.Merge(m, src) -} -func (m *AuthRoleGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` } -var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{85} } func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5591,45 +3223,14 @@ func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { } type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{81} -} -func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListResponse.Merge(m, src) -} -func (m *AuthRoleListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` } -var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{86} } func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5646,45 +3247,14 @@ func (m *AuthRoleListResponse) GetRoles() []string { } type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{82} -} -func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListResponse.Merge(m, src) -} -func (m *AuthUserListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` } -var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{87} } func (m *AuthUserListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5701,44 +3271,13 @@ func (m *AuthUserListResponse) GetUsers() []string { } type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{83} -} -func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src) -} -func (m *AuthRoleDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{88} } func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5748,45 +3287,16 @@ func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { } type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{84} -} -func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m) + return fileDescriptorRpc, []int{89} } -var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo - func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { if m != nil { return m.Header @@ -5795,44 +3305,15 @@ func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { } type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{85} -} -func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Size() int { - return m.Size() + return fileDescriptorRpc, []int{90} } -func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5842,13 +3323,6 @@ func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { } func init() { - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") @@ -5878,6 +3352,9 @@ func init() { proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") @@ -5894,6 +3371,8 @@ func init() { proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") @@ -5935,244 +3414,13 @@ func init() { proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") -} - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } - -var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 3711 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47, - 0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd, - 0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4, - 0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22, - 0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff, - 0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6, - 0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf, - 0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9, - 0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d, - 0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd, - 0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f, - 0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6, - 0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f, - 0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f, - 0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf, - 0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9, - 0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3, - 0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c, - 0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42, - 0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2, - 0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde, - 0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae, - 0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c, - 0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f, - 0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23, - 0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d, - 0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62, - 0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6, - 0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21, - 0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90, - 0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8, - 0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97, - 0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67, - 0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8, - 0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1, - 0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d, - 0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95, - 0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64, - 0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17, - 0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d, - 0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61, - 0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a, - 0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62, - 0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19, - 0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93, - 0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2, - 0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51, - 0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24, - 0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13, - 0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d, - 0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc, - 0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32, - 0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa, - 0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34, - 0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92, - 0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43, - 0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05, - 0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a, - 0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0, - 0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53, - 0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2, - 0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75, - 0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42, - 0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90, - 0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e, - 0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15, - 0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8, - 0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a, - 0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9, - 0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e, - 0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64, - 0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c, - 0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72, - 0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c, - 0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74, - 0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41, - 0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d, - 0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac, - 0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3, - 0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00, - 0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0, - 0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2, - 0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08, - 0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c, - 0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58, - 0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71, - 0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7, - 0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72, - 0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12, - 0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02, - 0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0, - 0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf, - 0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9, - 0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38, - 0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35, - 0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e, - 0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f, - 0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2, - 0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65, - 0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03, - 0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89, - 0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c, - 0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26, - 0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46, - 0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5, - 0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd, - 0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90, - 0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c, - 0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d, - 0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba, - 0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9, - 0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8, - 0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf, - 0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e, - 0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5, - 0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b, - 0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56, - 0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2, - 0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd, - 0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb, - 0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27, - 0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63, - 0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2, - 0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73, - 0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4, - 0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13, - 0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40, - 0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b, - 0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde, - 0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76, - 0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f, - 0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4, - 0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7, - 0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc, - 0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef, - 0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f, - 0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff, - 0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d, - 0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf, - 0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd, - 0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c, - 0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a, - 0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62, - 0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28, - 0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4, - 0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28, - 0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23, - 0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc, - 0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda, - 0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad, - 0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60, - 0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef, - 0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa, - 0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b, - 0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56, - 0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c, - 0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c, - 0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae, - 0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f, - 0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74, - 0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20, - 0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f, - 0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3, - 0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a, - 0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56, - 0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f, - 0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6, - 0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce, - 0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4, - 0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14, - 0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c, - 0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e, - 0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc, - 0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac, - 0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9, - 0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a, - 0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8, - 0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2, - 0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14, - 0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31, - 0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca, - 0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0, - 0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89, - 0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8, - 0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97, - 0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc, - 0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0, - 0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4, - 0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55, - 0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c, - 0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0, - 0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f, - 0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52, - 0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a, - 0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02, - 0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85, - 0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99, - 0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11, - 0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d, - 0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1, - 0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0, - 0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06, - 0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe, - 0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27, - 0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf, - 0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b, - 0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a, - 0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89, - 0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae, - 0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e, - 0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33, - 0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54, - 0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88, - 0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e, - 0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87, - 0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f, - 0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34, - 0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2, - 0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2, - 0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4, - 0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca, - 0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff, - 0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9, - 0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86, - 0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1, - 0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71, - 0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e, - 0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc, - 0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b, - 0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6, - 0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd, - 0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00, + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) } // Reference imports to suppress errors if they are not otherwise used. @@ -6183,9 +3431,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// KVClient is the client API for KV service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for KV service + type KVClient interface { // Range gets the keys in the range from the key-value store. Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) @@ -6218,7 +3465,7 @@ func NewKVClient(cc *grpc.ClientConn) KVClient { func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { out := new(RangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6227,7 +3474,7 @@ func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.Cal func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { out := new(PutResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6236,7 +3483,7 @@ func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOpt func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { out := new(DeleteRangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6245,7 +3492,7 @@ func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { out := new(TxnResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6254,14 +3501,15 @@ func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOpt func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { out := new(CompactionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// KVServer is the server API for KV service. +// Server API for KV service + type KVServer interface { // Range gets the keys in the range from the key-value store. Range(context.Context, *RangeRequest) (*RangeResponse, error) @@ -6284,26 +3532,6 @@ type KVServer interface { Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) } -// UnimplementedKVServer can be embedded to have forward compatible implementations. -type UnimplementedKVServer struct { -} - -func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") -} -func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") -} -func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented") -} -func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented") -} -func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") -} - func RegisterKVServer(s *grpc.Server, srv KVServer) { s.RegisterService(&_KV_serviceDesc, srv) } @@ -6427,9 +3655,8 @@ var _KV_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// WatchClient is the client API for Watch service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Watch service + type WatchClient interface { // Watch watches for events happening or that have happened. Both input and output // are streams; the input stream is for creating and canceling watchers and the output @@ -6448,7 +3675,7 @@ func NewWatchClient(cc *grpc.ClientConn) WatchClient { } func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...) + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) if err != nil { return nil, err } @@ -6478,7 +3705,8 @@ func (x *watchWatchClient) Recv() (*WatchResponse, error) { return m, nil } -// WatchServer is the server API for Watch service. +// Server API for Watch service + type WatchServer interface { // Watch watches for events happening or that have happened. Both input and output // are streams; the input stream is for creating and canceling watchers and the output @@ -6488,14 +3716,6 @@ type WatchServer interface { Watch(Watch_WatchServer) error } -// UnimplementedWatchServer can be embedded to have forward compatible implementations. -type UnimplementedWatchServer struct { -} - -func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} - func RegisterWatchServer(s *grpc.Server, srv WatchServer) { s.RegisterService(&_Watch_serviceDesc, srv) } @@ -6541,9 +3761,8 @@ var _Watch_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// LeaseClient is the client API for Lease service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Lease service + type LeaseClient interface { // LeaseGrant creates a lease which expires if the server does not receive a keepAlive // within a given time to live period. All keys attached to the lease will be expired and @@ -6570,7 +3789,7 @@ func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { out := new(LeaseGrantResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6579,7 +3798,7 @@ func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opt func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { out := new(LeaseRevokeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6587,7 +3806,7 @@ func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, o } func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) if err != nil { return nil, err } @@ -6619,7 +3838,7 @@ func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { out := new(LeaseTimeToLiveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6628,14 +3847,15 @@ func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRe func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { out := new(LeaseLeasesResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// LeaseServer is the server API for Lease service. +// Server API for Lease service + type LeaseServer interface { // LeaseGrant creates a lease which expires if the server does not receive a keepAlive // within a given time to live period. All keys attached to the lease will be expired and @@ -6652,26 +3872,6 @@ type LeaseServer interface { LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) } -// UnimplementedLeaseServer can be embedded to have forward compatible implementations. -type UnimplementedLeaseServer struct { -} - -func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented") -} -func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented") -} -func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error { - return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented") -} -func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented") -} -func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented") -} - func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { s.RegisterService(&_Lease_serviceDesc, srv) } @@ -6806,9 +4006,8 @@ var _Lease_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// ClusterClient is the client API for Cluster service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Cluster service + type ClusterClient interface { // MemberAdd adds a member into the cluster. MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) @@ -6818,6 +4017,8 @@ type ClusterClient interface { MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) // MemberList lists all the members in the cluster. MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) } type clusterClient struct { @@ -6830,7 +4031,7 @@ func NewClusterClient(cc *grpc.ClientConn) ClusterClient { func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { out := new(MemberAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6839,7 +4040,7 @@ func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opt func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { out := new(MemberRemoveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6848,7 +4049,7 @@ func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveReques func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { out := new(MemberUpdateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6857,14 +4058,24 @@ func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateReques func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { out := new(MemberListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// ClusterServer is the server API for Cluster service. +// Server API for Cluster service + type ClusterServer interface { // MemberAdd adds a member into the cluster. MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) @@ -6874,23 +4085,8 @@ type ClusterServer interface { MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) // MemberList lists all the members in the cluster. MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) -} - -// UnimplementedClusterServer can be embedded to have forward compatible implementations. -type UnimplementedClusterServer struct { -} - -func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented") -} -func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented") -} -func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented") -} -func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented") + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) } func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { @@ -6969,6 +4165,24 @@ func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Cluster_serviceDesc = grpc.ServiceDesc{ ServiceName: "etcdserverpb.Cluster", HandlerType: (*ClusterServer)(nil), @@ -6989,14 +4203,17 @@ var _Cluster_serviceDesc = grpc.ServiceDesc{ MethodName: "MemberList", Handler: _Cluster_MemberList_Handler, }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "rpc.proto", } -// MaintenanceClient is the client API for Maintenance service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Maintenance service + type MaintenanceClient interface { // Alarm activates, deactivates, and queries alarms regarding cluster health. Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) @@ -7004,11 +4221,15 @@ type MaintenanceClient interface { Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) @@ -7026,7 +4247,7 @@ func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { out := new(AlarmResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7035,7 +4256,7 @@ func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts .. func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7044,7 +4265,7 @@ func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { out := new(DefragmentResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7053,7 +4274,7 @@ func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentReques func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { out := new(HashResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7062,7 +4283,7 @@ func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...g func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { out := new(HashKVResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7070,7 +4291,7 @@ func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts } func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...) + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) if err != nil { return nil, err } @@ -7103,14 +4324,15 @@ func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { out := new(MoveLeaderResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// MaintenanceServer is the server API for Maintenance service. +// Server API for Maintenance service + type MaintenanceServer interface { // Alarm activates, deactivates, and queries alarms regarding cluster health. Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) @@ -7118,11 +4340,15 @@ type MaintenanceServer interface { Status(context.Context, *StatusRequest) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(context.Context, *HashRequest) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error @@ -7130,32 +4356,6 @@ type MaintenanceServer interface { MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) } -// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations. -type UnimplementedMaintenanceServer struct { -} - -func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented") -} -func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented") -} -func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented") -} -func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented") -} -func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error { - return status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} -func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented") -} - func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { s.RegisterService(&_Maintenance_serviceDesc, srv) } @@ -7328,9 +4528,8 @@ var _Maintenance_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// AuthClient is the client API for Auth service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Auth service + type AuthClient interface { // AuthEnable enables authentication. AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) @@ -7338,7 +4537,7 @@ type AuthClient interface { AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) // Authenticate processes an authenticate request. Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) // UserGet gets detailed user information. UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) @@ -7352,7 +4551,7 @@ type AuthClient interface { UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) // UserRevokeRole revokes a role of specified user. UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) // RoleGet gets detailed role information. RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) @@ -7376,7 +4575,7 @@ func NewAuthClient(cc *grpc.ClientConn) AuthClient { func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { out := new(AuthEnableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7385,7 +4584,7 @@ func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { out := new(AuthDisableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7394,7 +4593,7 @@ func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, op func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { out := new(AuthenticateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7403,7 +4602,7 @@ func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { out := new(AuthUserAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7412,7 +4611,7 @@ func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts . func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { out := new(AuthUserGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7421,7 +4620,7 @@ func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts . func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { out := new(AuthUserListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7430,7 +4629,7 @@ func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { out := new(AuthUserDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7439,7 +4638,7 @@ func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { out := new(AuthUserChangePasswordResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7448,7 +4647,7 @@ func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangeP func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { out := new(AuthUserGrantRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7457,7 +4656,7 @@ func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleReq func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { out := new(AuthUserRevokeRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7466,7 +4665,7 @@ func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleR func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { out := new(AuthRoleAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7475,7 +4674,7 @@ func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts . func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { out := new(AuthRoleGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7484,7 +4683,7 @@ func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts . func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { out := new(AuthRoleListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7493,7 +4692,7 @@ func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { out := new(AuthRoleDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7502,7 +4701,7 @@ func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { out := new(AuthRoleGrantPermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7511,14 +4710,15 @@ func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantP func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { out := new(AuthRoleRevokePermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// AuthServer is the server API for Auth service. +// Server API for Auth service + type AuthServer interface { // AuthEnable enables authentication. AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) @@ -7526,7 +4726,7 @@ type AuthServer interface { AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) // Authenticate processes an authenticate request. Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) // UserGet gets detailed user information. UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) @@ -7540,7 +4740,7 @@ type AuthServer interface { UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) // UserRevokeRole revokes a role of specified user. UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) // RoleGet gets detailed role information. RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) @@ -7554,59 +4754,6 @@ type AuthServer interface { RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) } -// UnimplementedAuthServer can be embedded to have forward compatible implementations. -type UnimplementedAuthServer struct { -} - -func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented") -} -func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented") -} -func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented") -} -func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented") -} -func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented") -} -func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented") -} -func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented") -} -func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented") -} -func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented") -} -func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented") -} -func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented") -} -func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented") -} -func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented") -} -func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented") -} -func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented") -} -func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented") -} - func RegisterAuthServer(s *grpc.Server, srv AuthServer) { s.RegisterService(&_Auth_serviceDesc, srv) } @@ -7975,7 +5122,7 @@ var _Auth_serviceDesc = grpc.ServiceDesc{ func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7983,46 +5130,37 @@ func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { } func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x20 + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) } if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - if m.MemberId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - i-- - dAtA[i] = 0x10 - } - if m.ClusterId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - i-- - dAtA[i] = 0x8 + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) } - return len(dAtA) - i, nil + return i, nil } func (m *RangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8030,110 +5168,99 @@ func (m *RangeRequest) Marshal() (dAtA []byte, err error) { } func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.MaxCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - i-- - dAtA[i] = 0x68 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - if m.MinCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - i-- - dAtA[i] = 0x60 + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) } - if m.MaxModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - i-- - dAtA[i] = 0x58 + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - if m.MinModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - i-- - dAtA[i] = 0x50 + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) } - if m.CountOnly { - i-- - if m.CountOnly { + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x48 + i++ } if m.KeysOnly { - i-- + dAtA[i] = 0x40 + i++ if m.KeysOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 + i++ } - if m.Serializable { - i-- - if m.Serializable { + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x38 - } - if m.SortTarget != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - i-- - dAtA[i] = 0x30 - } - if m.SortOrder != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - i-- - dAtA[i] = 0x28 + i++ } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x20 + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) } - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x18 + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *RangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8141,67 +5268,54 @@ func (m *RangeResponse) Marshal() (dAtA []byte, err error) { } func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - if m.Count != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x20 + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } if m.More { - i-- + dAtA[i] = 0x18 + i++ if m.More { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - } - if len(m.Kvs) > 0 { - for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + i++ } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) } - return len(dAtA) - i, nil + return i, nil } func (m *PutRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8209,75 +5323,64 @@ func (m *PutRequest) Marshal() (dAtA []byte, err error) { } func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.IgnoreLease { - i-- - if m.IgnoreLease { + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 + i++ } if m.IgnoreValue { - i-- + dAtA[i] = 0x28 + i++ if m.IgnoreValue { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x28 + i++ } - if m.PrevKv { - i-- - if m.PrevKv { + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - } - if m.Lease != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x18 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *PutResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8285,50 +5388,37 @@ func (m *PutResponse) Marshal() (dAtA []byte, err error) { } func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n3 } - return len(dAtA) - i, nil + return i, nil } func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8336,50 +5426,39 @@ func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { } func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } if m.PrevKv { - i-- + dAtA[i] = 0x18 + i++ if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8387,57 +5466,44 @@ func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { } func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PrevKvs) > 0 { - for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n4 } if m.Deleted != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8445,115 +5511,80 @@ func (m *RequestOp) Marshal() (dAtA []byte, err error) { } func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Request != nil { - { - size := m.Request.Size() - i -= size - if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn5 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestRange != nil { - { - size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestPut != nil { - { - size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestDeleteRange != nil { - { - size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestTxn != nil { - { - size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8561,115 +5592,80 @@ func (m *ResponseOp) Marshal() (dAtA []byte, err error) { } func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Response != nil { - { - size := m.Response.Size() - i -= size - if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn10, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn10 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseRange != nil { - { - size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponsePut != nil { - { - size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseDeleteRange != nil { - { - size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseTxn != nil { - { - size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } - return len(dAtA) - i, nil + return i, nil } func (m *Compare) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8677,120 +5673,86 @@ func (m *Compare) Marshal() (dAtA []byte, err error) { } func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0x82 + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) } - if m.TargetUnion != nil { - { - size := m.TargetUnion.Size() - i -= size - if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Target != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - i-- - dAtA[i] = 0x10 + if m.TargetUnion != nil { + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 } - if m.Result != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - i-- - dAtA[i] = 0x8 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - i-- + i := 0 dAtA[i] = 0x20 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil } func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - i-- + i := 0 dAtA[i] = 0x28 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil } func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - i-- + i := 0 dAtA[i] = 0x30 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil } func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - return len(dAtA) - i, nil + return i, nil } func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- + i := 0 dAtA[i] = 0x40 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + return i, nil } func (m *TxnRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8798,68 +5760,53 @@ func (m *TxnRequest) Marshal() (dAtA []byte, err error) { } func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Failure) > 0 { - for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n } } if len(m.Success) > 0 { - for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Success { dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Compare) > 0 { - for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *TxnResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8867,62 +5814,49 @@ func (m *TxnResponse) Marshal() (dAtA []byte, err error) { } func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Responses) > 0 { - for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n16 } if m.Succeeded { - i-- + dAtA[i] = 0x10 + i++ if m.Succeeded { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 + i++ } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8930,41 +5864,32 @@ func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { } func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } if m.Physical { - i-- + dAtA[i] = 0x10 + i++ if m.Physical { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8972,38 +5897,27 @@ func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { } func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 } - return len(dAtA) - i, nil + return i, nil } func (m *HashRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9011,26 +5925,17 @@ func (m *HashRequest) Marshal() (dAtA []byte, err error) { } func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9038,31 +5943,22 @@ func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { } func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - return len(dAtA) - i, nil + return i, nil } func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9070,48 +5966,37 @@ func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { } func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x18 + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 } if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.CompactRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *HashResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9119,43 +6004,32 @@ func (m *HashResponse) Marshal() (dAtA []byte, err error) { } func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 } if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9163,26 +6037,17 @@ func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { } func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9190,50 +6055,38 @@ func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { } func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Blob) > 0 { - i -= len(m.Blob) - copy(dAtA[i:], m.Blob) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 } if m.RemainingBytes != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9241,95 +6094,66 @@ func (m *WatchRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.RequestUnion != nil { - { - size := m.RequestUnion.Size() - i -= size - if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn21 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CreateRequest != nil { - { - size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CancelRequest != nil { - { - size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ProgressRequest != nil { - { - size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ProgressRequest.Size())) + n24, err := m.ProgressRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9337,98 +6161,86 @@ func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Fragment { - i-- - if m.Fragment { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x38 + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) } - if m.PrevKv { - i-- - if m.PrevKv { + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 + i++ } if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int + dAtA26 := make([]byte, len(m.Filters)*10) + var j25 int for _, num := range m.Filters { for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j21++ + j25++ } - dAtA22[j21] = uint8(num) - j21++ + dAtA26[j25] = uint8(num) + j25++ } - i -= j21 - copy(dAtA[i:], dAtA22[:j21]) - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i-- dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j25)) + i += copy(dAtA[i:], dAtA26[:j25]) } - if m.ProgressNotify { - i-- - if m.ProgressNotify { + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - } - if m.StartRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - i-- - dAtA[i] = 0x18 + i++ } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 + if m.WatchId != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.Fragment { + dAtA[i] = 0x40 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9436,31 +6248,22 @@ func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) } - return len(dAtA) - i, nil + return i, nil } func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9468,26 +6271,17 @@ func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *WatchResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9495,99 +6289,85 @@ func (m *WatchResponse) Marshal() (dAtA []byte, err error) { } func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n27 } - if m.Fragment { - i-- - if m.Fragment { + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x38 - } - if len(m.CancelReason) > 0 { - i -= len(m.CancelReason) - copy(dAtA[i:], m.CancelReason) - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i-- - dAtA[i] = 0x32 - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x28 + i++ } if m.Canceled { - i-- + dAtA[i] = 0x20 + i++ if m.Canceled { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 + i++ } - if m.Created { - i-- - if m.Created { + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if m.Fragment { + dAtA[i] = 0x38 + i++ + if m.Fragment { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 + i++ } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9595,36 +6375,27 @@ func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9632,55 +6403,43 @@ func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9688,31 +6447,22 @@ func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9720,161 +6470,207 @@ func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x18 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Checkpoints) > 0 { + for _, msg := range m.Checkpoints { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.Keys { - i-- + dAtA[i] = 0x10 + i++ if m.Keys { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9882,62 +6678,50 @@ func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n32 } - if m.GrantedTTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - i-- - dAtA[i] = 0x20 + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9945,26 +6729,17 @@ func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9972,31 +6747,22 @@ func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { } func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10004,52 +6770,39 @@ func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 } if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Leases { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *Member) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10057,56 +6810,68 @@ func (m *Member) Marshal() (dAtA []byte, err error) { } func (m *Member) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - if len(m.ClientURLs) > 0 { - for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClientURLs[iNdEx]) - copy(dAtA[i:], m.ClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.IsLearner { + dAtA[i] = 0x28 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10114,35 +6879,42 @@ func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x10 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10150,64 +6922,49 @@ func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n34 } if m.Member != nil { - { - size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n35, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10215,31 +6972,22 @@ func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10247,52 +6995,39 @@ func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10300,40 +7035,37 @@ func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10341,52 +7073,39 @@ func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10394,26 +7113,17 @@ func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10421,118 +7131,148 @@ func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return len(dAtA) - i, nil + return i, nil } -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 } - return len(dAtA) - i, nil + return i, nil } func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10540,31 +7280,22 @@ func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { } func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.TargetID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) } - return len(dAtA) - i, nil + return i, nil } func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10572,38 +7303,27 @@ func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { } func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n41, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10611,41 +7331,32 @@ func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { } func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- - dAtA[i] = 0x18 + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) } if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) } - if m.Action != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmMember) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10653,36 +7364,27 @@ func (m *AlarmMember) Marshal() (dAtA []byte, err error) { } func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) } if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) } - if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10690,52 +7392,39 @@ func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { } func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 } if len(m.Alarms) > 0 { - for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Alarms { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *StatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10743,26 +7432,17 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) { } func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *StatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10770,65 +7450,88 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) { } func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x30 + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) } - if m.RaftIndex != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - i-- - dAtA[i] = 0x28 + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) } if m.Leader != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - i-- dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) } - if m.DbSize != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - i-- - dAtA[i] = 0x18 + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.RaftAppliedIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + if m.DbSizeInUse != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + } + if m.IsLearner { + dAtA[i] = 0x50 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10836,26 +7539,17 @@ func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10863,26 +7557,17 @@ func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10890,40 +7575,29 @@ func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10931,40 +7605,39 @@ func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Options.Size())) + n44, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10972,33 +7645,23 @@ func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11006,33 +7669,23 @@ func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11040,40 +7693,29 @@ func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11081,40 +7723,29 @@ func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11122,40 +7753,29 @@ func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0x12 - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11163,33 +7783,23 @@ func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11197,33 +7807,23 @@ func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11231,26 +7831,17 @@ func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11258,26 +7849,17 @@ func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11285,33 +7867,23 @@ func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11319,45 +7891,33 @@ func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.Perm != nil { - { - size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n45, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11365,47 +7925,35 @@ func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0xa + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11413,38 +7961,27 @@ func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11452,38 +7989,27 @@ func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11491,45 +8017,33 @@ func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 } if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11537,38 +8051,27 @@ func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11576,47 +8079,42 @@ func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11624,38 +8122,27 @@ func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11663,38 +8150,27 @@ func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11702,38 +8178,27 @@ func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11741,38 +8206,27 @@ func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n54, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11780,38 +8234,27 @@ func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n55, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11819,52 +8262,39 @@ func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 } if len(m.Perm) > 0 { - for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Perm { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11872,47 +8302,42 @@ func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n57, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11920,47 +8345,42 @@ func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n58, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 } if len(m.Users) > 0 { - for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Users[iNdEx]) - copy(dAtA[i:], m.Users[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx]))) - i-- + for _, s := range m.Users { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11968,38 +8388,27 @@ func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n59, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -12007,38 +8416,27 @@ func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n60, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -12046,49 +8444,33 @@ func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n61, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - offset -= sovRpc(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *ResponseHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ClusterId != 0 { @@ -12103,16 +8485,10 @@ func (m *ResponseHeader) Size() (n int) { if m.RaftTerm != 0 { n += 1 + sovRpc(uint64(m.RaftTerm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RangeRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12156,16 +8532,10 @@ func (m *RangeRequest) Size() (n int) { if m.MaxCreateRevision != 0 { n += 1 + sovRpc(uint64(m.MaxCreateRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RangeResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12184,16 +8554,10 @@ func (m *RangeResponse) Size() (n int) { if m.Count != 0 { n += 1 + sovRpc(uint64(m.Count)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *PutRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12216,16 +8580,10 @@ func (m *PutRequest) Size() (n int) { if m.IgnoreLease { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *PutResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12236,16 +8594,10 @@ func (m *PutResponse) Size() (n int) { l = m.PrevKv.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *DeleteRangeRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12259,16 +8611,10 @@ func (m *DeleteRangeRequest) Size() (n int) { if m.PrevKv { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *DeleteRangeResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12284,31 +8630,19 @@ func (m *DeleteRangeResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RequestOp) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Request != nil { n += m.Request.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RequestOp_RequestRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestRange != nil { @@ -12318,9 +8652,6 @@ func (m *RequestOp_RequestRange) Size() (n int) { return n } func (m *RequestOp_RequestPut) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestPut != nil { @@ -12330,9 +8661,6 @@ func (m *RequestOp_RequestPut) Size() (n int) { return n } func (m *RequestOp_RequestDeleteRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestDeleteRange != nil { @@ -12342,9 +8670,6 @@ func (m *RequestOp_RequestDeleteRange) Size() (n int) { return n } func (m *RequestOp_RequestTxn) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestTxn != nil { @@ -12354,24 +8679,15 @@ func (m *RequestOp_RequestTxn) Size() (n int) { return n } func (m *ResponseOp) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Response != nil { n += m.Response.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ResponseOp_ResponseRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseRange != nil { @@ -12381,9 +8697,6 @@ func (m *ResponseOp_ResponseRange) Size() (n int) { return n } func (m *ResponseOp_ResponsePut) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponsePut != nil { @@ -12393,9 +8706,6 @@ func (m *ResponseOp_ResponsePut) Size() (n int) { return n } func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseDeleteRange != nil { @@ -12405,9 +8715,6 @@ func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { return n } func (m *ResponseOp_ResponseTxn) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseTxn != nil { @@ -12417,9 +8724,6 @@ func (m *ResponseOp_ResponseTxn) Size() (n int) { return n } func (m *Compare) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Result != 0 { @@ -12439,43 +8743,28 @@ func (m *Compare) Size() (n int) { if l > 0 { n += 2 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Compare_Version) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.Version)) return n } func (m *Compare_CreateRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.CreateRevision)) return n } func (m *Compare_ModRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.ModRevision)) return n } func (m *Compare_Value) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -12485,18 +8774,12 @@ func (m *Compare_Value) Size() (n int) { return n } func (m *Compare_Lease) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.Lease)) return n } func (m *TxnRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Compare) > 0 { @@ -12517,16 +8800,10 @@ func (m *TxnRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *TxnResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12542,16 +8819,10 @@ func (m *TxnResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *CompactionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Revision != 0 { @@ -12560,59 +8831,35 @@ func (m *CompactionRequest) Size() (n int) { if m.Physical { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *CompactionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashKVRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Revision != 0 { n += 1 + sovRpc(uint64(m.Revision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashKVResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12625,16 +8872,10 @@ func (m *HashKVResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12644,28 +8885,16 @@ func (m *HashResponse) Size() (n int) { if m.Hash != 0 { n += 1 + sovRpc(uint64(m.Hash)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *SnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *SnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12679,31 +8908,19 @@ func (m *SnapshotResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestUnion != nil { n += m.RequestUnion.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchRequest_CreateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CreateRequest != nil { @@ -12713,9 +8930,6 @@ func (m *WatchRequest_CreateRequest) Size() (n int) { return n } func (m *WatchRequest_CancelRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CancelRequest != nil { @@ -12725,9 +8939,6 @@ func (m *WatchRequest_CancelRequest) Size() (n int) { return n } func (m *WatchRequest_ProgressRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ProgressRequest != nil { @@ -12737,9 +8948,6 @@ func (m *WatchRequest_ProgressRequest) Size() (n int) { return n } func (m *WatchCreateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12772,43 +8980,25 @@ func (m *WatchCreateRequest) Size() (n int) { if m.Fragment { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchCancelRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.WatchId != 0 { n += 1 + sovRpc(uint64(m.WatchId)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchProgressRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12840,16 +9030,10 @@ func (m *WatchResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseGrantRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.TTL != 0 { @@ -12858,16 +9042,10 @@ func (m *LeaseGrantRequest) Size() (n int) { if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseGrantResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12884,62 +9062,72 @@ func (m *LeaseGrantResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseRevokeRequest) Size() (n int) { - if m == nil { - return 0 + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) } + return n +} + +func (m *LeaseCheckpoint) Size() (n int) { var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Remaining_TTL != 0 { + n += 1 + sovRpc(uint64(m.Remaining_TTL)) } return n } -func (m *LeaseRevokeResponse) Size() (n int) { - if m == nil { - return 0 +func (m *LeaseCheckpointRequest) Size() (n int) { + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, e := range m.Checkpoints { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } } + return n +} + +func (m *LeaseCheckpointResponse) Size() (n int) { var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseKeepAliveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseKeepAliveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12952,16 +9140,10 @@ func (m *LeaseKeepAliveResponse) Size() (n int) { if m.TTL != 0 { n += 1 + sovRpc(uint64(m.TTL)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseTimeToLiveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -12970,16 +9152,10 @@ func (m *LeaseTimeToLiveRequest) Size() (n int) { if m.Keys { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseTimeToLiveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13001,43 +9177,25 @@ func (m *LeaseTimeToLiveResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13050,16 +9208,10 @@ func (m *LeaseLeasesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Member) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -13081,16 +9233,13 @@ func (m *Member) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.IsLearner { + n += 2 } return n } func (m *MemberAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.PeerURLs) > 0 { @@ -13099,16 +9248,13 @@ func (m *MemberAddRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.IsLearner { + n += 2 } return n } func (m *MemberAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13125,31 +9271,19 @@ func (m *MemberAddResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberRemoveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberRemoveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13162,16 +9296,10 @@ func (m *MemberRemoveResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberUpdateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -13183,16 +9311,10 @@ func (m *MemberUpdateRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberUpdateResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13205,28 +9327,16 @@ func (m *MemberUpdateResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13239,75 +9349,70 @@ func (m *MemberListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } -func (m *DefragmentRequest) Size() (n int) { - if m == nil { - return 0 +func (m *MemberPromoteRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) } + return n +} + +func (m *MemberPromoteResponse) Size() (n int) { var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } } return n } +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + func (m *DefragmentResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MoveLeaderRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.TargetID != 0 { n += 1 + sovRpc(uint64(m.TargetID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MoveLeaderResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Action != 0 { @@ -13319,16 +9424,10 @@ func (m *AlarmRequest) Size() (n int) { if m.Alarm != 0 { n += 1 + sovRpc(uint64(m.Alarm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmMember) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MemberID != 0 { @@ -13337,16 +9436,10 @@ func (m *AlarmMember) Size() (n int) { if m.Alarm != 0 { n += 1 + sovRpc(uint64(m.Alarm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13359,28 +9452,16 @@ func (m *AlarmResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13403,40 +9484,37 @@ func (m *StatusResponse) Size() (n int) { if m.RaftTerm != 0 { n += 1 + sovRpc(uint64(m.RaftTerm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.RaftAppliedIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } + if m.IsLearner { + n += 2 } return n } func (m *AuthEnableRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthDisableRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13447,16 +9525,10 @@ func (m *AuthenticateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13467,48 +9539,34 @@ func (m *AuthUserAddRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovRpc(uint64(l)) } return n } func (m *AuthUserGetRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserChangePasswordRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13519,16 +9577,10 @@ func (m *AuthUserChangePasswordRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGrantRoleRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.User) @@ -13539,16 +9591,10 @@ func (m *AuthUserGrantRoleRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserRevokeRoleRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13559,88 +9605,52 @@ func (m *AuthUserRevokeRoleRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGetRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13651,16 +9661,10 @@ func (m *AuthRoleGrantPermissionRequest) Size() (n int) { l = m.Perm.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) @@ -13675,48 +9679,30 @@ func (m *AuthRoleRevokePermissionRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthEnableResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthDisableResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthenticateResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13727,32 +9713,20 @@ func (m *AuthenticateResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGetResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13765,96 +9739,60 @@ func (m *AuthUserGetResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserChangePasswordResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGrantRoleResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserRevokeRoleResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGetResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13867,16 +9805,10 @@ func (m *AuthRoleGetResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13889,16 +9821,10 @@ func (m *AuthRoleListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13911,63 +9837,49 @@ func (m *AuthUserListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovRpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} func sozRpc(x uint64) (n int) { return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -13986,7 +9898,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14014,7 +9926,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterId |= uint64(b&0x7F) << shift + m.ClusterId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14033,7 +9945,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberId |= uint64(b&0x7F) << shift + m.MemberId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14052,7 +9964,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14071,7 +9983,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift + m.RaftTerm |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14085,13 +9997,9 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14116,7 +10024,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14144,7 +10052,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14153,9 +10061,6 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14178,7 +10083,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14187,9 +10092,6 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14212,7 +10114,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= int64(b&0x7F) << shift + m.Limit |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14231,7 +10133,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14250,7 +10152,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift if b < 0x80 { break } @@ -14269,7 +10171,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift if b < 0x80 { break } @@ -14288,7 +10190,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14308,7 +10210,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14328,7 +10230,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14348,7 +10250,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinModRevision |= int64(b&0x7F) << shift + m.MinModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14367,7 +10269,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxModRevision |= int64(b&0x7F) << shift + m.MaxModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14386,7 +10288,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinCreateRevision |= int64(b&0x7F) << shift + m.MinCreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14405,7 +10307,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxCreateRevision |= int64(b&0x7F) << shift + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14419,13 +10321,9 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14450,7 +10348,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14478,7 +10376,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14487,9 +10385,6 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14514,7 +10409,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14523,9 +10418,6 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14548,7 +10440,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14568,7 +10460,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int64(b&0x7F) << shift + m.Count |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14582,13 +10474,9 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14613,7 +10501,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14641,7 +10529,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14650,9 +10538,6 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14675,7 +10560,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14684,9 +10569,6 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14709,7 +10591,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14728,7 +10610,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14748,7 +10630,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14768,7 +10650,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14783,13 +10665,9 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14814,7 +10692,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14842,7 +10720,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14851,9 +10729,6 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14878,7 +10753,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14887,9 +10762,6 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14909,13 +10781,9 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14940,7 +10808,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14968,7 +10836,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14977,9 +10845,6 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15002,7 +10867,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15011,9 +10876,6 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15036,7 +10898,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15051,13 +10913,9 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15082,7 +10940,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15110,7 +10968,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15119,9 +10977,6 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15146,7 +11001,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Deleted |= int64(b&0x7F) << shift + m.Deleted |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15165,7 +11020,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15174,9 +11029,6 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15194,13 +11046,9 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15225,7 +11073,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15253,7 +11101,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15262,9 +11110,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15288,7 +11133,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15297,9 +11142,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15323,7 +11165,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15332,9 +11174,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15358,7 +11197,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15367,9 +11206,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15388,13 +11224,9 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15419,7 +11251,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15447,7 +11279,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15456,9 +11288,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15482,7 +11311,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15491,9 +11320,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15517,7 +11343,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15526,9 +11352,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15552,7 +11375,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15561,9 +11384,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15582,13 +11402,9 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15613,7 +11429,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15641,7 +11457,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= Compare_CompareResult(b&0x7F) << shift + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift if b < 0x80 { break } @@ -15660,7 +11476,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Target |= Compare_CompareTarget(b&0x7F) << shift + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift if b < 0x80 { break } @@ -15679,7 +11495,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15688,9 +11504,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15713,7 +11526,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15733,7 +11546,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15753,7 +11566,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15773,7 +11586,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15782,9 +11595,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15806,7 +11616,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15826,7 +11636,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15835,9 +11645,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15855,13 +11662,9 @@ func (m *Compare) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15886,7 +11689,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15914,7 +11717,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15923,9 +11726,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15948,7 +11748,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15957,9 +11757,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15982,7 +11779,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15991,9 +11788,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16011,13 +11805,9 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16042,7 +11832,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16070,7 +11860,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16079,9 +11869,6 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16106,7 +11893,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16126,7 +11913,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16135,9 +11922,6 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16155,13 +11939,9 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16186,7 +11966,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16214,7 +11994,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16233,7 +12013,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16248,13 +12028,9 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16279,7 +12055,7 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16307,7 +12083,7 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16316,9 +12092,6 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16338,13 +12111,9 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16369,7 +12138,7 @@ func (m *HashRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16392,13 +12161,9 @@ func (m *HashRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16423,7 +12188,7 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16451,7 +12216,7 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16465,13 +12230,9 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16496,7 +12257,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16524,7 +12285,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16533,9 +12294,6 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16560,7 +12318,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Hash |= uint32(b&0x7F) << shift + m.Hash |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -16579,7 +12337,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift + m.CompactRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16593,13 +12351,9 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16624,7 +12378,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16652,7 +12406,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16661,9 +12415,6 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16688,7 +12439,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Hash |= uint32(b&0x7F) << shift + m.Hash |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -16702,13 +12453,9 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16733,7 +12480,7 @@ func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16756,13 +12503,9 @@ func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16787,7 +12530,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16815,7 +12558,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16824,9 +12567,6 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16851,7 +12591,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RemainingBytes |= uint64(b&0x7F) << shift + m.RemainingBytes |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16870,7 +12610,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16879,9 +12619,6 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16899,13 +12636,9 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16930,7 +12663,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16958,7 +12691,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16967,9 +12700,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16993,7 +12723,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17002,9 +12732,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17028,7 +12755,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17037,9 +12764,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17058,13 +12782,9 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17089,7 +12809,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17117,7 +12837,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17126,9 +12846,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17151,7 +12868,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17160,9 +12877,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17185,7 +12899,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartRevision |= int64(b&0x7F) << shift + m.StartRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17204,7 +12918,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17222,7 +12936,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift if b < 0x80 { break } @@ -17239,7 +12953,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= int(b&0x7F) << shift + packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17248,16 +12962,9 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } - var elementCount int - if elementCount != 0 && len(m.Filters) == 0 { - m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount) - } for iNdEx < postIndex { var v WatchCreateRequest_FilterType for shift := uint(0); ; shift += 7 { @@ -17269,7 +12976,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift if b < 0x80 { break } @@ -17293,7 +13000,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17313,7 +13020,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17332,7 +13039,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17347,13 +13054,9 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17378,7 +13081,7 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17406,7 +13109,7 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17420,13 +13123,9 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17451,7 +13150,7 @@ func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17474,13 +13173,9 @@ func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17505,7 +13200,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17533,7 +13228,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17542,9 +13237,6 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17569,7 +13261,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17578,7 +13270,284 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) } - var v int + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17588,17 +13557,30 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Created = bool(v != 0) - case 4: + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var v int + m.ID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17608,17 +13590,16 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Canceled = bool(v != 0) - case 5: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) } - m.CompactRevision = 0 + m.TTL = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17628,14 +13609,14 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17647,7 +13628,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17657,67 +13638,10 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fragment = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17728,13 +13652,9 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17744,7 +13664,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17759,7 +13679,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17767,32 +13687,13 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } @@ -17806,7 +13707,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17820,13 +13721,9 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17836,7 +13733,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17851,7 +13748,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17859,10 +13756,10 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -17879,7 +13776,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17888,9 +13785,6 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17901,7 +13795,57 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } @@ -17915,35 +13859,16 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) } - var stringLen uint64 + m.Remaining_TTL = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17953,24 +13878,11 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Remaining_TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -17980,13 +13892,9 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17996,7 +13904,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18011,7 +13919,7 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18019,17 +13927,17 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) } - m.ID = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -18039,11 +13947,23 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) + if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -18053,13 +13973,9 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18069,7 +13985,7 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18084,7 +14000,7 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18092,10 +14008,10 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -18112,7 +14028,7 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18121,9 +14037,6 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18143,13 +14056,9 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18174,7 +14083,7 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18202,7 +14111,7 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18216,13 +14125,9 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18247,7 +14152,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18275,7 +14180,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18284,9 +14189,6 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18311,7 +14213,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18330,7 +14232,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TTL |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18344,13 +14246,9 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18375,7 +14273,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18403,7 +14301,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18422,7 +14320,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18437,13 +14335,9 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18468,7 +14362,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18496,7 +14390,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18505,9 +14399,6 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18532,7 +14423,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18551,7 +14442,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TTL |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18570,7 +14461,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GrantedTTL |= int64(b&0x7F) << shift + m.GrantedTTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18589,7 +14480,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18598,9 +14489,6 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18616,13 +14504,9 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18647,7 +14531,7 @@ func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18670,13 +14554,9 @@ func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18701,7 +14581,7 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18729,7 +14609,7 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18743,13 +14623,9 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18774,7 +14650,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18802,7 +14678,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18811,9 +14687,6 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18838,7 +14711,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18847,9 +14720,6 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18867,13 +14737,9 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18898,7 +14764,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18926,7 +14792,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18945,7 +14811,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18955,9 +14821,6 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18977,7 +14840,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18987,9 +14850,6 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19009,7 +14869,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19019,14 +14879,31 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19036,13 +14913,9 @@ func (m *Member) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19067,7 +14940,7 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19095,7 +14968,7 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19105,14 +14978,31 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19122,13 +15012,9 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19153,7 +15039,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19181,7 +15067,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19190,9 +15076,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19217,7 +15100,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19226,9 +15109,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19253,7 +15133,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19262,9 +15142,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19282,13 +15159,192 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthRpc } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19298,7 +15354,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19313,7 +15369,7 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19321,10 +15377,10 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19341,11 +15397,40 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19355,13 +15440,9 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19371,7 +15452,7 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19386,7 +15467,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19394,10 +15475,10 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19414,7 +15495,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19423,9 +15504,6 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19450,7 +15528,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19459,9 +15537,6 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19479,13 +15554,9 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19495,7 +15566,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19510,7 +15581,7 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19518,63 +15589,12 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19584,13 +15604,9 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19600,7 +15616,7 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19615,7 +15631,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19623,10 +15639,10 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19643,7 +15659,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19652,9 +15668,6 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19679,7 +15692,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19688,9 +15701,6 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19708,13 +15718,9 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19724,7 +15730,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19739,7 +15745,7 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19747,12 +15753,31 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19762,13 +15787,9 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19778,7 +15799,7 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19793,7 +15814,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19801,10 +15822,10 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19821,7 +15842,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19830,9 +15851,6 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19857,7 +15875,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19866,9 +15884,6 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19886,13 +15901,9 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19917,7 +15928,7 @@ func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19940,13 +15951,9 @@ func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19971,7 +15978,7 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19999,7 +16006,7 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20008,9 +16015,6 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20030,13 +16034,9 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20061,7 +16061,7 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20089,7 +16089,7 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TargetID |= uint64(b&0x7F) << shift + m.TargetID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20103,13 +16103,9 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20134,7 +16130,7 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20162,7 +16158,7 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20171,9 +16167,6 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20193,13 +16186,9 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20224,7 +16213,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20252,7 +16241,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift if b < 0x80 { break } @@ -20271,7 +16260,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift + m.MemberID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20290,7 +16279,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift + m.Alarm |= (AlarmType(b) & 0x7F) << shift if b < 0x80 { break } @@ -20304,13 +16293,9 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20335,7 +16320,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20363,7 +16348,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift + m.MemberID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20382,7 +16367,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift + m.Alarm |= (AlarmType(b) & 0x7F) << shift if b < 0x80 { break } @@ -20396,13 +16381,9 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20427,7 +16408,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20455,7 +16436,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20464,9 +16445,6 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20491,7 +16469,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20500,9 +16478,6 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20520,13 +16495,9 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20551,7 +16522,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20574,13 +16545,9 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20605,7 +16572,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20633,7 +16600,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20642,9 +16609,6 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20669,7 +16633,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20679,9 +16643,6 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20701,7 +16662,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DbSize |= int64(b&0x7F) << shift + m.DbSize |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20710,7 +16671,83 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) } - m.Leader = 0 + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) + } + m.RaftAppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftAppliedIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20720,16 +16757,26 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Leader |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) } - m.RaftIndex = 0 + m.DbSizeInUse = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20739,16 +16786,16 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftIndex |= uint64(b&0x7F) << shift + m.DbSizeInUse |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) } - m.RaftTerm = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20758,11 +16805,12 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -20772,13 +16820,9 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20803,7 +16847,7 @@ func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20826,13 +16870,9 @@ func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20857,7 +16897,7 @@ func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20880,13 +16920,9 @@ func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20911,7 +16947,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20939,7 +16975,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20949,9 +16985,6 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20971,7 +17004,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20981,9 +17014,6 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20998,13 +17028,9 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21029,7 +17055,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21057,7 +17083,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21067,9 +17093,6 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21089,7 +17112,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21099,13 +17122,43 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Password = string(dAtA[iNdEx:postIndex]) + if m.Options == nil { + m.Options = &authpb.UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -21116,13 +17169,9 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21147,7 +17196,7 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21175,7 +17224,7 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21185,9 +17234,6 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21202,13 +17248,9 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21233,7 +17275,7 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21261,7 +17303,7 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21271,9 +17313,6 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21288,13 +17327,9 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21319,7 +17354,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21347,7 +17382,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21357,9 +17392,6 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21379,7 +17411,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21389,9 +17421,6 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21406,13 +17435,9 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21437,7 +17462,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21465,7 +17490,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21475,9 +17500,6 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21497,7 +17519,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21507,9 +17529,6 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21524,13 +17543,9 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21555,7 +17570,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21583,7 +17598,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21593,9 +17608,6 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21615,7 +17627,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21625,9 +17637,6 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21642,13 +17651,9 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21673,7 +17678,7 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21701,7 +17706,7 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21711,9 +17716,6 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21728,13 +17730,9 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21759,7 +17757,7 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21787,7 +17785,7 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21797,9 +17795,6 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21814,13 +17809,9 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21845,7 +17836,7 @@ func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21868,13 +17859,9 @@ func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21899,7 +17886,7 @@ func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21922,13 +17909,9 @@ func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21953,7 +17936,7 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21981,7 +17964,7 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21991,9 +17974,6 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22008,13 +17988,9 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22039,7 +18015,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22067,7 +18043,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22077,9 +18053,6 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22099,7 +18072,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22108,9 +18081,6 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22130,13 +18100,9 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22161,7 +18127,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22189,7 +18155,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22199,9 +18165,6 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22211,7 +18174,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -22221,29 +18184,28 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -22253,23 +18215,22 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -22280,13 +18241,9 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22311,7 +18268,7 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22339,7 +18296,7 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22348,9 +18305,6 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22370,13 +18324,9 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22401,7 +18351,7 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22429,7 +18379,7 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22438,9 +18388,6 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22460,13 +18407,9 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22491,7 +18434,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22519,7 +18462,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22528,9 +18471,6 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22555,7 +18495,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22565,9 +18505,6 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22582,13 +18519,9 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22613,7 +18546,7 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22641,7 +18574,7 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22650,9 +18583,6 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22672,13 +18602,9 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22703,7 +18629,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22731,7 +18657,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22740,9 +18666,6 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22767,7 +18690,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22777,9 +18700,6 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22794,13 +18714,9 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22825,7 +18741,7 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22853,7 +18769,7 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22862,9 +18778,6 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22884,13 +18797,9 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22915,7 +18824,7 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22943,7 +18852,7 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22952,9 +18861,6 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22974,13 +18880,9 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23005,7 +18907,7 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23033,7 +18935,7 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23042,9 +18944,6 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23064,13 +18963,9 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23095,7 +18990,7 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23123,7 +19018,7 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23132,9 +19027,6 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23154,13 +19046,9 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23185,7 +19073,7 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23213,7 +19101,7 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23222,9 +19110,6 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23244,13 +19129,9 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23275,7 +19156,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23303,7 +19184,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23312,9 +19193,6 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23339,7 +19217,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23348,9 +19226,6 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23368,13 +19243,9 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23399,7 +19270,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23427,7 +19298,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23436,9 +19307,6 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23463,7 +19331,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23473,9 +19341,6 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23490,13 +19355,9 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23521,7 +19382,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23549,7 +19410,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23558,9 +19419,6 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23585,7 +19443,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23595,9 +19453,6 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23612,13 +19467,9 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23643,7 +19494,7 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23671,7 +19522,7 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23680,9 +19531,6 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23702,13 +19550,9 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23733,7 +19577,7 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23761,7 +19605,7 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23770,9 +19614,6 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23792,13 +19633,9 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23823,7 +19660,7 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23851,7 +19688,7 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23860,9 +19697,6 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23882,13 +19716,9 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23952,11 +19782,8 @@ func skipRpc(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRpc - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRpc } return iNdEx, nil @@ -23987,9 +19814,6 @@ func skipRpc(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRpc - } } return iNdEx, nil case 4: @@ -24008,3 +19832,255 @@ var ( ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc9, + 0x75, 0x56, 0x93, 0xe2, 0xed, 0xf0, 0x22, 0xaa, 0x74, 0x19, 0x0e, 0x67, 0x46, 0xa3, 0xad, 0xd9, + 0xd9, 0xd5, 0xce, 0xec, 0x8a, 0x6b, 0xd9, 0x4e, 0x80, 0x49, 0xe2, 0x58, 0x23, 0x71, 0x67, 0xb4, + 0xd2, 0x88, 0xda, 0x16, 0x67, 0xf6, 0x02, 0x23, 0x42, 0x8b, 0x2c, 0x49, 0x1d, 0x91, 0xdd, 0x74, + 0x77, 0x93, 0x23, 0x6d, 0x2e, 0x0e, 0x0c, 0xc7, 0x40, 0xf2, 0x68, 0x03, 0x41, 0xf2, 0x90, 0xa7, + 0x20, 0x08, 0xfc, 0x90, 0xe7, 0x00, 0xf9, 0x05, 0x79, 0xca, 0x05, 0xf9, 0x03, 0xc1, 0xc6, 0x2f, + 0xc9, 0xaf, 0x30, 0xea, 0xd6, 0x5d, 0x7d, 0xa3, 0xc6, 0xa6, 0x77, 0x5f, 0xa4, 0xae, 0x53, 0xa7, + 0xce, 0x39, 0x75, 0xaa, 0xea, 0x9c, 0xd3, 0x5f, 0x17, 0xa1, 0xe4, 0x8c, 0x7a, 0x9b, 0x23, 0xc7, + 0xf6, 0x6c, 0x54, 0x21, 0x5e, 0xaf, 0xef, 0x12, 0x67, 0x42, 0x9c, 0xd1, 0x69, 0x73, 0xf9, 0xdc, + 0x3e, 0xb7, 0x59, 0x47, 0x8b, 0x3e, 0x71, 0x9e, 0xe6, 0x6d, 0xca, 0xd3, 0x1a, 0x4e, 0x7a, 0x3d, + 0xf6, 0x67, 0x74, 0xda, 0xba, 0x9c, 0x88, 0xae, 0x3b, 0xac, 0xcb, 0x18, 0x7b, 0x17, 0xec, 0xcf, + 0xe8, 0x94, 0xfd, 0x13, 0x9d, 0x77, 0xcf, 0x6d, 0xfb, 0x7c, 0x40, 0x5a, 0xc6, 0xc8, 0x6c, 0x19, + 0x96, 0x65, 0x7b, 0x86, 0x67, 0xda, 0x96, 0xcb, 0x7b, 0xf1, 0x5f, 0x6a, 0x50, 0xd3, 0x89, 0x3b, + 0xb2, 0x2d, 0x97, 0x3c, 0x27, 0x46, 0x9f, 0x38, 0xe8, 0x1e, 0x40, 0x6f, 0x30, 0x76, 0x3d, 0xe2, + 0x9c, 0x98, 0xfd, 0x86, 0xb6, 0xae, 0x6d, 0xcc, 0xeb, 0x25, 0x41, 0xd9, 0xeb, 0xa3, 0x3b, 0x50, + 0x1a, 0x92, 0xe1, 0x29, 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf5, 0x51, 0x13, 0x8a, 0x0e, + 0x99, 0x98, 0xae, 0x69, 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, + 0x67, 0xde, 0x89, 0x47, 0x9c, 0x61, 0x63, 0x9e, 0x0f, 0xa4, 0x84, 0x2e, 0x71, 0x86, 0xf8, 0x27, + 0x39, 0xa8, 0xe8, 0x86, 0x75, 0x4e, 0x74, 0xf2, 0xc3, 0x31, 0x71, 0x3d, 0x54, 0x87, 0xec, 0x25, + 0xb9, 0x66, 0xea, 0x2b, 0x3a, 0x7d, 0xe4, 0xe3, 0xad, 0x73, 0x72, 0x42, 0x2c, 0xae, 0xb8, 0x42, + 0xc7, 0x5b, 0xe7, 0xa4, 0x6d, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x84, 0x56, 0xde, + 0x08, 0x99, 0x33, 0x1f, 0x31, 0x67, 0x07, 0xc0, 0xb5, 0x1d, 0xef, 0xc4, 0x76, 0xfa, 0xc4, 0x69, + 0xe4, 0xd6, 0xb5, 0x8d, 0xda, 0xd6, 0xdb, 0x9b, 0xea, 0x42, 0x6c, 0xaa, 0x06, 0x6d, 0x1e, 0xdb, + 0x8e, 0xd7, 0xa1, 0xbc, 0x7a, 0xc9, 0x95, 0x8f, 0xe8, 0x23, 0x28, 0x33, 0x21, 0x9e, 0xe1, 0x9c, + 0x13, 0xaf, 0x91, 0x67, 0x52, 0x1e, 0xde, 0x20, 0xa5, 0xcb, 0x98, 0x75, 0xa6, 0x9e, 0x3f, 0x23, + 0x0c, 0x15, 0x97, 0x38, 0xa6, 0x31, 0x30, 0xbf, 0x34, 0x4e, 0x07, 0xa4, 0x51, 0x58, 0xd7, 0x36, + 0x8a, 0x7a, 0x88, 0x46, 0xe7, 0x7f, 0x49, 0xae, 0xdd, 0x13, 0xdb, 0x1a, 0x5c, 0x37, 0x8a, 0x8c, + 0xa1, 0x48, 0x09, 0x1d, 0x6b, 0x70, 0xcd, 0x16, 0xcd, 0x1e, 0x5b, 0x1e, 0xef, 0x2d, 0xb1, 0xde, + 0x12, 0xa3, 0xb0, 0xee, 0x0d, 0xa8, 0x0f, 0x4d, 0xeb, 0x64, 0x68, 0xf7, 0x4f, 0x7c, 0x87, 0x00, + 0x73, 0x48, 0x6d, 0x68, 0x5a, 0x2f, 0xec, 0xbe, 0x2e, 0xdd, 0x42, 0x39, 0x8d, 0xab, 0x30, 0x67, + 0x59, 0x70, 0x1a, 0x57, 0x2a, 0xe7, 0x26, 0x2c, 0x51, 0x99, 0x3d, 0x87, 0x18, 0x1e, 0x09, 0x98, + 0x2b, 0x8c, 0x79, 0x71, 0x68, 0x5a, 0x3b, 0xac, 0x27, 0xc4, 0x6f, 0x5c, 0xc5, 0xf8, 0xab, 0x82, + 0xdf, 0xb8, 0x0a, 0xf3, 0xe3, 0x4d, 0x28, 0xf9, 0x3e, 0x47, 0x45, 0x98, 0x3f, 0xec, 0x1c, 0xb6, + 0xeb, 0x73, 0x08, 0x20, 0xbf, 0x7d, 0xbc, 0xd3, 0x3e, 0xdc, 0xad, 0x6b, 0xa8, 0x0c, 0x85, 0xdd, + 0x36, 0x6f, 0x64, 0xf0, 0x53, 0x80, 0xc0, 0xbb, 0xa8, 0x00, 0xd9, 0xfd, 0xf6, 0xe7, 0xf5, 0x39, + 0xca, 0xf3, 0xaa, 0xad, 0x1f, 0xef, 0x75, 0x0e, 0xeb, 0x1a, 0x1d, 0xbc, 0xa3, 0xb7, 0xb7, 0xbb, + 0xed, 0x7a, 0x86, 0x72, 0xbc, 0xe8, 0xec, 0xd6, 0xb3, 0xa8, 0x04, 0xb9, 0x57, 0xdb, 0x07, 0x2f, + 0xdb, 0xf5, 0x79, 0xfc, 0x73, 0x0d, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xfc, 0x05, + 0x3b, 0x17, 0x6c, 0x2b, 0x96, 0xb7, 0xee, 0x46, 0x16, 0x37, 0x74, 0x76, 0x74, 0xc1, 0x8b, 0x30, + 0x64, 0x2f, 0x27, 0x6e, 0x23, 0xb3, 0x9e, 0xdd, 0x28, 0x6f, 0xd5, 0x37, 0xf9, 0x81, 0xdd, 0xdc, + 0x27, 0xd7, 0xaf, 0x8c, 0xc1, 0x98, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0x1f, 0xda, 0x0e, 0x61, 0x3b, + 0xb6, 0xa8, 0xb3, 0x67, 0xba, 0x8d, 0xd9, 0xa2, 0x89, 0xdd, 0xca, 0x1b, 0xf8, 0x17, 0x1a, 0xc0, + 0xd1, 0xd8, 0x4b, 0x3f, 0x1a, 0xcb, 0x90, 0x9b, 0x50, 0xc1, 0xe2, 0x58, 0xf0, 0x06, 0x3b, 0x13, + 0xc4, 0x70, 0x89, 0x7f, 0x26, 0x68, 0x03, 0xdd, 0x82, 0xc2, 0xc8, 0x21, 0x93, 0x93, 0xcb, 0x09, + 0x53, 0x52, 0xd4, 0xf3, 0xb4, 0xb9, 0x3f, 0x41, 0x6f, 0x41, 0xc5, 0x3c, 0xb7, 0x6c, 0x87, 0x9c, + 0x70, 0x59, 0x39, 0xd6, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x03, + 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x33, 0xb9, 0xef, 0xbd, 0xc0, 0xc6, 0x0c, 0x1b, 0x16, 0x77, + 0xa1, 0xb0, 0x1a, 0xff, 0x00, 0xd0, 0x2e, 0x19, 0x10, 0x8f, 0xcc, 0x12, 0x3d, 0x14, 0x9f, 0x64, + 0x55, 0x9f, 0xe0, 0x9f, 0x69, 0xb0, 0x14, 0x12, 0x3f, 0xd3, 0xb4, 0x1a, 0x50, 0xe8, 0x33, 0x61, + 0xdc, 0x82, 0xac, 0x2e, 0x9b, 0xe8, 0x31, 0x14, 0x85, 0x01, 0x6e, 0x23, 0x9b, 0xb2, 0x69, 0x0a, + 0xdc, 0x26, 0x17, 0xff, 0x22, 0x03, 0x25, 0x31, 0xd1, 0xce, 0x08, 0x6d, 0x43, 0xd5, 0xe1, 0x8d, + 0x13, 0x36, 0x1f, 0x61, 0x51, 0x33, 0x3d, 0x08, 0x3d, 0x9f, 0xd3, 0x2b, 0x62, 0x08, 0x23, 0xa3, + 0xdf, 0x83, 0xb2, 0x14, 0x31, 0x1a, 0x7b, 0xc2, 0xe5, 0x8d, 0xb0, 0x80, 0x60, 0xff, 0x3d, 0x9f, + 0xd3, 0x41, 0xb0, 0x1f, 0x8d, 0x3d, 0xd4, 0x85, 0x65, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x65, + 0x52, 0xd6, 0xc3, 0x52, 0xe2, 0x4b, 0xf5, 0x7c, 0x4e, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, + 0x5d, 0xf1, 0xe0, 0x1d, 0x33, 0xa9, 0x7b, 0x65, 0xc5, 0x4d, 0xea, 0x5e, 0x59, 0x4f, 0x4b, 0x50, + 0x10, 0x2d, 0xfc, 0x2f, 0x19, 0x00, 0xb9, 0x1a, 0x9d, 0x11, 0xda, 0x85, 0x9a, 0x23, 0x5a, 0x21, + 0x6f, 0xdd, 0x49, 0xf4, 0x96, 0x58, 0xc4, 0x39, 0xbd, 0x2a, 0x07, 0x71, 0xe3, 0xbe, 0x07, 0x15, + 0x5f, 0x4a, 0xe0, 0xb0, 0xdb, 0x09, 0x0e, 0xf3, 0x25, 0x94, 0xe5, 0x00, 0xea, 0xb2, 0x4f, 0x61, + 0xc5, 0x1f, 0x9f, 0xe0, 0xb3, 0xb7, 0xa6, 0xf8, 0xcc, 0x17, 0xb8, 0x24, 0x25, 0xa8, 0x5e, 0x53, + 0x0d, 0x0b, 0xdc, 0x76, 0x3b, 0xc1, 0x6d, 0x71, 0xc3, 0xa8, 0xe3, 0x80, 0xe6, 0x4b, 0xde, 0xc4, + 0xff, 0x97, 0x85, 0xc2, 0x8e, 0x3d, 0x1c, 0x19, 0x0e, 0x5d, 0x8d, 0xbc, 0x43, 0xdc, 0xf1, 0xc0, + 0x63, 0xee, 0xaa, 0x6d, 0x3d, 0x08, 0x4b, 0x14, 0x6c, 0xf2, 0xbf, 0xce, 0x58, 0x75, 0x31, 0x84, + 0x0e, 0x16, 0xe9, 0x31, 0xf3, 0x06, 0x83, 0x45, 0x72, 0x14, 0x43, 0xe4, 0x41, 0xce, 0x06, 0x07, + 0xb9, 0x09, 0x85, 0x09, 0x71, 0x82, 0x94, 0xfe, 0x7c, 0x4e, 0x97, 0x04, 0xf4, 0x1e, 0x2c, 0x44, + 0xd3, 0x4b, 0x4e, 0xf0, 0xd4, 0x7a, 0xe1, 0x6c, 0xf4, 0x00, 0x2a, 0xa1, 0x1c, 0x97, 0x17, 0x7c, + 0xe5, 0xa1, 0x92, 0xe2, 0x56, 0x65, 0x5c, 0xa5, 0xf9, 0xb8, 0xf2, 0x7c, 0x4e, 0x46, 0xd6, 0x55, + 0x19, 0x59, 0x8b, 0x62, 0x94, 0x88, 0xad, 0xa1, 0x20, 0xf3, 0xfd, 0x70, 0x90, 0xc1, 0xdf, 0x87, + 0x6a, 0xc8, 0x41, 0x34, 0xef, 0xb4, 0x3f, 0x79, 0xb9, 0x7d, 0xc0, 0x93, 0xd4, 0x33, 0x96, 0x97, + 0xf4, 0xba, 0x46, 0x73, 0xdd, 0x41, 0xfb, 0xf8, 0xb8, 0x9e, 0x41, 0x55, 0x28, 0x1d, 0x76, 0xba, + 0x27, 0x9c, 0x2b, 0x8b, 0x9f, 0xf9, 0x12, 0x44, 0x92, 0x53, 0x72, 0xdb, 0x9c, 0x92, 0xdb, 0x34, + 0x99, 0xdb, 0x32, 0x41, 0x6e, 0x63, 0x69, 0xee, 0xa0, 0xbd, 0x7d, 0xdc, 0xae, 0xcf, 0x3f, 0xad, + 0x41, 0x85, 0xfb, 0xf7, 0x64, 0x6c, 0xd1, 0x54, 0xfb, 0x0f, 0x1a, 0x40, 0x70, 0x9a, 0x50, 0x0b, + 0x0a, 0x3d, 0xae, 0xa7, 0xa1, 0xb1, 0x60, 0xb4, 0x92, 0xb8, 0x64, 0xba, 0xe4, 0x42, 0xdf, 0x82, + 0x82, 0x3b, 0xee, 0xf5, 0x88, 0x2b, 0x53, 0xde, 0xad, 0x68, 0x3c, 0x14, 0xd1, 0x4a, 0x97, 0x7c, + 0x74, 0xc8, 0x99, 0x61, 0x0e, 0xc6, 0x2c, 0x01, 0x4e, 0x1f, 0x22, 0xf8, 0xf0, 0xdf, 0x69, 0x50, + 0x56, 0x36, 0xef, 0x6f, 0x18, 0x84, 0xef, 0x42, 0x89, 0xd9, 0x40, 0xfa, 0x22, 0x0c, 0x17, 0xf5, + 0x80, 0x80, 0x7e, 0x07, 0x4a, 0xf2, 0x04, 0xc8, 0x48, 0xdc, 0x48, 0x16, 0xdb, 0x19, 0xe9, 0x01, + 0x2b, 0xde, 0x87, 0x45, 0xe6, 0x95, 0x1e, 0x2d, 0xae, 0xa5, 0x1f, 0xd5, 0xf2, 0x53, 0x8b, 0x94, + 0x9f, 0x4d, 0x28, 0x8e, 0x2e, 0xae, 0x5d, 0xb3, 0x67, 0x0c, 0x84, 0x15, 0x7e, 0x1b, 0x7f, 0x0c, + 0x48, 0x15, 0x36, 0xcb, 0x74, 0x71, 0x15, 0xca, 0xcf, 0x0d, 0xf7, 0x42, 0x98, 0x84, 0x1f, 0x43, + 0x95, 0x36, 0xf7, 0x5f, 0xbd, 0x81, 0x8d, 0xec, 0xe5, 0x40, 0x72, 0xcf, 0xe4, 0x73, 0x04, 0xf3, + 0x17, 0x86, 0x7b, 0xc1, 0x26, 0x5a, 0xd5, 0xd9, 0x33, 0x7a, 0x0f, 0xea, 0x3d, 0x3e, 0xc9, 0x93, + 0xc8, 0x2b, 0xc3, 0x82, 0xa0, 0xfb, 0x95, 0xe0, 0x67, 0x50, 0xe1, 0x73, 0xf8, 0x6d, 0x1b, 0x81, + 0x17, 0x61, 0xe1, 0xd8, 0x32, 0x46, 0xee, 0x85, 0x2d, 0xb3, 0x1b, 0x9d, 0x74, 0x3d, 0xa0, 0xcd, + 0xa4, 0xf1, 0x5d, 0x58, 0x70, 0xc8, 0xd0, 0x30, 0x2d, 0xd3, 0x3a, 0x3f, 0x39, 0xbd, 0xf6, 0x88, + 0x2b, 0x5e, 0x98, 0x6a, 0x3e, 0xf9, 0x29, 0xa5, 0x52, 0xd3, 0x4e, 0x07, 0xf6, 0xa9, 0x08, 0x73, + 0xec, 0x19, 0xff, 0x34, 0x03, 0x95, 0x4f, 0x0d, 0xaf, 0x27, 0x97, 0x0e, 0xed, 0x41, 0xcd, 0x0f, + 0x6e, 0x8c, 0x22, 0x6c, 0x89, 0xa4, 0x58, 0x36, 0x46, 0x96, 0xd2, 0x32, 0x3b, 0x56, 0x7b, 0x2a, + 0x81, 0x89, 0x32, 0xac, 0x1e, 0x19, 0xf8, 0xa2, 0x32, 0xe9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, + 0xa0, 0x0e, 0xd4, 0x47, 0x8e, 0x7d, 0xee, 0x10, 0xd7, 0xf5, 0x85, 0xf1, 0x34, 0x86, 0x13, 0x84, + 0x1d, 0x09, 0xd6, 0x40, 0xdc, 0xc2, 0x28, 0x4c, 0x7a, 0xba, 0x10, 0xd4, 0x33, 0x3c, 0x38, 0xfd, + 0x57, 0x06, 0x50, 0x7c, 0x52, 0xbf, 0x6e, 0x89, 0xf7, 0x10, 0x6a, 0xae, 0x67, 0x38, 0xb1, 0xcd, + 0x56, 0x65, 0x54, 0x3f, 0xe2, 0xbf, 0x0b, 0xbe, 0x41, 0x27, 0x96, 0xed, 0x99, 0x67, 0xd7, 0xa2, + 0x4a, 0xae, 0x49, 0xf2, 0x21, 0xa3, 0xa2, 0x36, 0x14, 0xce, 0xcc, 0x81, 0x47, 0x1c, 0xb7, 0x91, + 0x5b, 0xcf, 0x6e, 0xd4, 0xb6, 0x1e, 0xdf, 0xb4, 0x0c, 0x9b, 0x1f, 0x31, 0xfe, 0xee, 0xf5, 0x88, + 0xe8, 0x72, 0xac, 0x5a, 0x79, 0xe6, 0x43, 0xd5, 0xf8, 0x6d, 0x28, 0xbe, 0xa6, 0x22, 0xe8, 0x5b, + 0x76, 0x81, 0x17, 0x8b, 0xac, 0xcd, 0x5f, 0xb2, 0xcf, 0x1c, 0xe3, 0x7c, 0x48, 0x2c, 0x4f, 0xbe, + 0x07, 0xca, 0x36, 0x7e, 0x08, 0x10, 0xa8, 0xa1, 0x21, 0xff, 0xb0, 0x73, 0xf4, 0xb2, 0x5b, 0x9f, + 0x43, 0x15, 0x28, 0x1e, 0x76, 0x76, 0xdb, 0x07, 0x6d, 0x9a, 0x1f, 0x70, 0x4b, 0xba, 0x34, 0xb4, + 0x96, 0xaa, 0x4e, 0x2d, 0xa4, 0x13, 0xaf, 0xc2, 0x72, 0xd2, 0x02, 0xd2, 0x5a, 0xb4, 0x2a, 0x76, + 0xe9, 0x4c, 0x47, 0x45, 0x55, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0xa2, 0x38, + 0x97, 0x4d, 0xea, 0x08, 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x0c, 0x2f, 0xb9, 0xc4, + 0xf0, 0x82, 0x1e, 0x40, 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x6a, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, + 0xd2, 0x42, 0x4e, 0x2f, 0x84, 0x9d, 0x8e, 0x1e, 0x42, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, + 0xcb, 0x18, 0x55, 0x59, 0xbb, 0xb7, 0x29, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xde, 0x91, + 0x9e, 0x39, 0x86, 0xa5, 0xbe, 0xcc, 0x75, 0xbb, 0x07, 0xc2, 0xdd, 0xf4, 0x11, 0xd5, 0x20, 0xb3, + 0xb7, 0x2b, 0x9c, 0x90, 0xd9, 0xdb, 0xc5, 0x3f, 0xd6, 0x00, 0xa9, 0xe3, 0x66, 0xf2, 0x73, 0x44, + 0xb8, 0x54, 0x9f, 0x0d, 0xd4, 0x2f, 0x43, 0x8e, 0x38, 0x8e, 0xed, 0x30, 0x8f, 0x96, 0x74, 0xde, + 0xc0, 0x6f, 0x0b, 0x1b, 0x74, 0x32, 0xb1, 0x2f, 0xfd, 0x33, 0xc8, 0xa5, 0x69, 0xbe, 0xa9, 0xfb, + 0xb0, 0x14, 0xe2, 0x9a, 0x29, 0x73, 0x7d, 0x04, 0x0b, 0x4c, 0xd8, 0xce, 0x05, 0xe9, 0x5d, 0x8e, + 0x6c, 0xd3, 0x8a, 0xe9, 0xa3, 0x2b, 0x17, 0x04, 0x58, 0x3a, 0x0f, 0x3e, 0xb1, 0x8a, 0x4f, 0xec, + 0x76, 0x0f, 0xf0, 0xe7, 0xb0, 0x1a, 0x91, 0x23, 0xcd, 0xff, 0x43, 0x28, 0xf7, 0x7c, 0xa2, 0x2b, + 0x6a, 0x9d, 0x7b, 0x61, 0xe3, 0xa2, 0x43, 0xd5, 0x11, 0xb8, 0x03, 0xb7, 0x62, 0xa2, 0x67, 0x9a, + 0xf3, 0xbb, 0xb0, 0xc2, 0x04, 0xee, 0x13, 0x32, 0xda, 0x1e, 0x98, 0x93, 0x54, 0x4f, 0x8f, 0xc4, + 0xa4, 0x14, 0xc6, 0xaf, 0x77, 0x5f, 0xe0, 0xdf, 0x17, 0x1a, 0xbb, 0xe6, 0x90, 0x74, 0xed, 0x83, + 0x74, 0xdb, 0x68, 0x36, 0xbb, 0x24, 0xd7, 0xae, 0x28, 0x6b, 0xd8, 0x33, 0xfe, 0x47, 0x4d, 0xb8, + 0x4a, 0x1d, 0xfe, 0x35, 0xef, 0xe4, 0x35, 0x80, 0x73, 0x7a, 0x64, 0x48, 0x9f, 0x76, 0x70, 0x44, + 0x45, 0xa1, 0xf8, 0x76, 0xd2, 0xf8, 0x5d, 0x11, 0x76, 0x2e, 0x8b, 0x7d, 0xce, 0xfe, 0xf8, 0x51, + 0xee, 0x1e, 0x94, 0x19, 0xe1, 0xd8, 0x33, 0xbc, 0xb1, 0x1b, 0x5b, 0x8c, 0x3f, 0x17, 0xdb, 0x5e, + 0x0e, 0x9a, 0x69, 0x5e, 0xdf, 0x82, 0x3c, 0x7b, 0x99, 0x90, 0xa5, 0xf4, 0xed, 0x84, 0xfd, 0xc8, + 0xed, 0xd0, 0x05, 0x23, 0xfe, 0xa9, 0x06, 0xf9, 0x17, 0x0c, 0x82, 0x55, 0x4c, 0x9b, 0x97, 0x6b, + 0x61, 0x19, 0x43, 0x0e, 0x0c, 0x95, 0x74, 0xf6, 0xcc, 0x4a, 0x4f, 0x42, 0x9c, 0x97, 0xfa, 0x01, + 0x2f, 0x71, 0x4b, 0xba, 0xdf, 0xa6, 0x3e, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, + 0x2a, 0x14, 0x5a, 0x3d, 0x9b, 0xee, 0x01, 0x31, 0x1c, 0x4b, 0x80, 0xa6, 0x45, 0x3d, 0x20, 0xe0, + 0x03, 0xa8, 0x73, 0x3b, 0xb6, 0xfb, 0x7d, 0xa5, 0xc0, 0xf4, 0xb5, 0x69, 0x11, 0x6d, 0x21, 0x69, + 0x99, 0xa8, 0xb4, 0x7f, 0xd2, 0x60, 0x51, 0x11, 0x37, 0x93, 0x57, 0xdf, 0x87, 0x3c, 0x07, 0xa9, + 0x45, 0xa5, 0xb3, 0x1c, 0x1e, 0xc5, 0xd5, 0xe8, 0x82, 0x07, 0x6d, 0x42, 0x81, 0x3f, 0xc9, 0x77, + 0x80, 0x64, 0x76, 0xc9, 0x84, 0x1f, 0xc2, 0x92, 0x20, 0x91, 0xa1, 0x9d, 0x74, 0x30, 0xd8, 0x62, + 0xe0, 0x3f, 0x85, 0xe5, 0x30, 0xdb, 0x4c, 0x53, 0x52, 0x8c, 0xcc, 0xbc, 0x89, 0x91, 0xdb, 0xd2, + 0xc8, 0x97, 0xa3, 0xbe, 0x52, 0x47, 0x45, 0x77, 0x8c, 0xba, 0x5e, 0x99, 0xf0, 0x7a, 0x05, 0x13, + 0x90, 0x22, 0xbe, 0xd1, 0x09, 0x2c, 0xc9, 0xed, 0x70, 0x60, 0xba, 0x7e, 0xb9, 0xfe, 0x25, 0x20, + 0x95, 0xf8, 0x8d, 0x1a, 0xf4, 0x8e, 0x74, 0xc7, 0x91, 0x63, 0x0f, 0xed, 0x54, 0x97, 0xe2, 0x3f, + 0x83, 0x95, 0x08, 0xdf, 0x37, 0xed, 0xb7, 0x5d, 0x22, 0x8b, 0x15, 0xe9, 0xb7, 0x8f, 0x01, 0xa9, + 0xc4, 0x99, 0xb2, 0x56, 0x0b, 0x16, 0x5f, 0xd8, 0x13, 0x1a, 0xfe, 0x28, 0x35, 0x38, 0xf7, 0x1c, + 0x63, 0xf0, 0x5d, 0xe1, 0xb7, 0xa9, 0x72, 0x75, 0xc0, 0x4c, 0xca, 0xff, 0x43, 0x83, 0xca, 0xf6, + 0xc0, 0x70, 0x86, 0x52, 0xf1, 0xf7, 0x20, 0xcf, 0xdf, 0x9c, 0x05, 0x58, 0xf5, 0x4e, 0x58, 0x8c, + 0xca, 0xcb, 0x1b, 0xdb, 0xfc, 0x3d, 0x5b, 0x8c, 0xa2, 0x86, 0x8b, 0xef, 0x59, 0xbb, 0x91, 0xef, + 0x5b, 0xbb, 0xe8, 0x03, 0xc8, 0x19, 0x74, 0x08, 0x4b, 0x33, 0xb5, 0x28, 0x66, 0xc1, 0xa4, 0xb1, + 0xfa, 0x9e, 0x73, 0xe1, 0xef, 0x40, 0x59, 0xd1, 0x80, 0x0a, 0x90, 0x7d, 0xd6, 0x16, 0xc5, 0xf8, + 0xf6, 0x4e, 0x77, 0xef, 0x15, 0x07, 0x6b, 0x6a, 0x00, 0xbb, 0x6d, 0xbf, 0x9d, 0xc1, 0x9f, 0x89, + 0x51, 0x22, 0xa4, 0xab, 0xf6, 0x68, 0x69, 0xf6, 0x64, 0xde, 0xc8, 0x9e, 0x2b, 0xa8, 0x8a, 0xe9, + 0xcf, 0x9a, 0xa2, 0x98, 0xbc, 0x94, 0x14, 0xa5, 0x18, 0xaf, 0x0b, 0x46, 0xbc, 0x00, 0x55, 0x91, + 0xb4, 0xc4, 0xfe, 0xfb, 0xf7, 0x0c, 0xd4, 0x24, 0x65, 0x56, 0x50, 0x5d, 0xe2, 0x81, 0x3c, 0xc9, + 0xf9, 0x68, 0xe0, 0x2a, 0xe4, 0xfb, 0xa7, 0xc7, 0xe6, 0x97, 0xf2, 0x03, 0x88, 0x68, 0x51, 0xfa, + 0x80, 0xeb, 0xe1, 0x5f, 0x21, 0x45, 0x8b, 0x66, 0x23, 0xc7, 0x38, 0xf3, 0xf6, 0xac, 0x3e, 0xb9, + 0x62, 0xb9, 0x6d, 0x5e, 0x0f, 0x08, 0x0c, 0x28, 0x11, 0x5f, 0x2b, 0xd9, 0x0b, 0x82, 0xf2, 0xf5, + 0x12, 0x3d, 0x82, 0x3a, 0x7d, 0xde, 0x1e, 0x8d, 0x06, 0x26, 0xe9, 0x73, 0x01, 0x05, 0xc6, 0x13, + 0xa3, 0x53, 0xed, 0xac, 0xa4, 0x76, 0x1b, 0x45, 0x16, 0x5d, 0x45, 0x0b, 0xad, 0x43, 0x99, 0xdb, + 0xb7, 0x67, 0xbd, 0x74, 0x09, 0xfb, 0x84, 0x97, 0xd5, 0x55, 0x52, 0x38, 0x5b, 0x42, 0x34, 0x5b, + 0x2e, 0xc1, 0xe2, 0xf6, 0xd8, 0xbb, 0x68, 0x5b, 0xc6, 0xe9, 0x40, 0x46, 0x22, 0x5a, 0xce, 0x50, + 0xe2, 0xae, 0xe9, 0xaa, 0xd4, 0x36, 0x2c, 0x51, 0x2a, 0xb1, 0x3c, 0xb3, 0xa7, 0x64, 0x02, 0x59, + 0x2b, 0x68, 0x91, 0x5a, 0xc1, 0x70, 0xdd, 0xd7, 0xb6, 0xd3, 0x17, 0xee, 0xf5, 0xdb, 0x78, 0xc2, + 0x85, 0xbf, 0x74, 0x43, 0xf9, 0xfe, 0xd7, 0x94, 0x82, 0x3e, 0x84, 0x82, 0x3d, 0x62, 0x9f, 0xa4, + 0x05, 0x6e, 0xb0, 0xba, 0xc9, 0x3f, 0x62, 0x6f, 0x0a, 0xc1, 0x1d, 0xde, 0xab, 0x4b, 0x36, 0xbc, + 0x11, 0xe8, 0x7d, 0x46, 0xbc, 0x29, 0x7a, 0xf1, 0x63, 0x58, 0x91, 0x9c, 0x02, 0x26, 0x9f, 0xc2, + 0xdc, 0x81, 0x7b, 0x92, 0x79, 0xe7, 0xc2, 0xb0, 0xce, 0xc9, 0x91, 0x30, 0xf1, 0x37, 0xf5, 0xcf, + 0x53, 0x68, 0xf8, 0x76, 0xb2, 0x57, 0x37, 0x7b, 0xa0, 0x1a, 0x30, 0x76, 0xc5, 0x4e, 0x2f, 0xe9, + 0xec, 0x99, 0xd2, 0x1c, 0x7b, 0xe0, 0xd7, 0x6a, 0xf4, 0x19, 0xef, 0xc0, 0x6d, 0x29, 0x43, 0xbc, + 0x54, 0x85, 0x85, 0xc4, 0x0c, 0x4a, 0x12, 0x22, 0x1c, 0x46, 0x87, 0x4e, 0x5f, 0x28, 0x95, 0x33, + 0xec, 0x5a, 0x26, 0x53, 0x53, 0x64, 0xae, 0xf0, 0x3d, 0x44, 0x0d, 0x53, 0xd3, 0xb1, 0x20, 0x53, + 0x01, 0x2a, 0x59, 0x2c, 0x04, 0x25, 0xc7, 0x16, 0x22, 0x26, 0xfa, 0x07, 0xb0, 0xe6, 0x1b, 0x41, + 0xfd, 0x76, 0x44, 0x9c, 0xa1, 0xe9, 0xba, 0x0a, 0xb0, 0x9a, 0x34, 0xf1, 0x77, 0x60, 0x7e, 0x44, + 0x44, 0x24, 0x2c, 0x6f, 0x21, 0xb9, 0x89, 0x94, 0xc1, 0xac, 0x1f, 0xf7, 0xe1, 0xbe, 0x94, 0xce, + 0x3d, 0x9a, 0x28, 0x3e, 0x6a, 0x94, 0x84, 0x9b, 0x32, 0x29, 0x70, 0x53, 0x36, 0x02, 0xf6, 0x7f, + 0xcc, 0x1d, 0x29, 0x4f, 0xe3, 0x4c, 0x19, 0x6e, 0x9f, 0xfb, 0xd4, 0x3f, 0xc4, 0x33, 0x09, 0x3b, + 0x85, 0xe5, 0xf0, 0xd9, 0x9f, 0x29, 0xf8, 0x2e, 0x43, 0xce, 0xb3, 0x2f, 0x89, 0x0c, 0xbd, 0xbc, + 0x21, 0x0d, 0xf6, 0x03, 0xc3, 0x4c, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x67, 0xb5, 0x97, 0xae, + 0xa6, 0xac, 0x6c, 0x79, 0x03, 0x1f, 0xc2, 0x6a, 0x34, 0x4c, 0xcc, 0x64, 0xf2, 0x2b, 0xbe, 0x81, + 0x93, 0x22, 0xc9, 0x4c, 0x72, 0x3f, 0x09, 0x82, 0x81, 0x12, 0x50, 0x66, 0x12, 0xa9, 0x43, 0x33, + 0x29, 0xbe, 0xfc, 0x36, 0xf6, 0xab, 0x1f, 0x6e, 0x66, 0x12, 0xe6, 0x06, 0xc2, 0x66, 0x5f, 0xfe, + 0x20, 0x46, 0x64, 0xa7, 0xc6, 0x08, 0x71, 0x48, 0x82, 0x28, 0xf6, 0x35, 0x6c, 0x3a, 0xa1, 0x23, + 0x08, 0xa0, 0xb3, 0xea, 0xa0, 0x39, 0xc4, 0xd7, 0xc1, 0x1a, 0x72, 0x63, 0xab, 0x61, 0x77, 0xa6, + 0xc5, 0xf8, 0x34, 0x88, 0x9d, 0xb1, 0xc8, 0x3c, 0x93, 0xe0, 0xcf, 0x60, 0x3d, 0x3d, 0x28, 0xcf, + 0x22, 0xf9, 0x51, 0x0b, 0x4a, 0x7e, 0x19, 0xac, 0xdc, 0x22, 0x2a, 0x43, 0xe1, 0xb0, 0x73, 0x7c, + 0xb4, 0xbd, 0xd3, 0xe6, 0xd7, 0x88, 0x76, 0x3a, 0xba, 0xfe, 0xf2, 0xa8, 0x5b, 0xcf, 0x6c, 0xfd, + 0x32, 0x0b, 0x99, 0xfd, 0x57, 0xe8, 0x73, 0xc8, 0xf1, 0x6f, 0xea, 0x53, 0x2e, 0x52, 0x34, 0xa7, + 0x5d, 0x1b, 0xc0, 0xb7, 0x7e, 0xfc, 0xdf, 0xbf, 0xfc, 0x79, 0x66, 0x11, 0x57, 0x5a, 0x93, 0x6f, + 0xb7, 0x2e, 0x27, 0x2d, 0x96, 0x1b, 0x9e, 0x68, 0x8f, 0xd0, 0x27, 0x90, 0x3d, 0x1a, 0x7b, 0x28, + 0xf5, 0x82, 0x45, 0x33, 0xfd, 0x26, 0x01, 0x5e, 0x61, 0x42, 0x17, 0x30, 0x08, 0xa1, 0xa3, 0xb1, + 0x47, 0x45, 0xfe, 0x10, 0xca, 0xea, 0x3d, 0x80, 0x1b, 0x6f, 0x5d, 0x34, 0x6f, 0xbe, 0x63, 0x80, + 0xef, 0x31, 0x55, 0xb7, 0x30, 0x12, 0xaa, 0xf8, 0x4d, 0x05, 0x75, 0x16, 0xdd, 0x2b, 0x0b, 0xa5, + 0xde, 0xc9, 0x68, 0xa6, 0x5f, 0x3b, 0x88, 0xcd, 0xc2, 0xbb, 0xb2, 0xa8, 0xc8, 0x3f, 0x16, 0x37, + 0x0e, 0x7a, 0x1e, 0xba, 0x9f, 0xf0, 0xc5, 0x59, 0xfd, 0xb6, 0xda, 0x5c, 0x4f, 0x67, 0x10, 0x4a, + 0xee, 0x32, 0x25, 0xab, 0x78, 0x51, 0x28, 0xe9, 0xf9, 0x2c, 0x4f, 0xb4, 0x47, 0x5b, 0x3d, 0xc8, + 0xb1, 0xef, 0x16, 0xe8, 0x0b, 0xf9, 0xd0, 0x4c, 0xf8, 0x80, 0x93, 0xb2, 0xd0, 0xa1, 0x2f, 0x1e, + 0x78, 0x99, 0x29, 0xaa, 0xe1, 0x12, 0x55, 0xc4, 0xbe, 0x5a, 0x3c, 0xd1, 0x1e, 0x6d, 0x68, 0x1f, + 0x6a, 0x5b, 0xff, 0x9c, 0x83, 0x1c, 0x03, 0xec, 0xd0, 0x25, 0x40, 0x80, 0xe1, 0x47, 0x67, 0x17, + 0xfb, 0x2a, 0x10, 0x9d, 0x5d, 0x1c, 0xfe, 0xc7, 0x4d, 0xa6, 0x74, 0x19, 0x2f, 0x50, 0xa5, 0x0c, + 0x07, 0x6c, 0x31, 0x68, 0x93, 0xfa, 0xf1, 0xaf, 0x34, 0x81, 0x57, 0xf2, 0xb3, 0x84, 0x92, 0xa4, + 0x85, 0x80, 0xfc, 0xe8, 0x76, 0x48, 0x00, 0xf1, 0xf1, 0x77, 0x99, 0xc2, 0x16, 0xae, 0x07, 0x0a, + 0x1d, 0xc6, 0xf1, 0x44, 0x7b, 0xf4, 0x45, 0x03, 0x2f, 0x09, 0x2f, 0x47, 0x7a, 0xd0, 0x8f, 0xa0, + 0x16, 0x06, 0xaa, 0xd1, 0x83, 0x04, 0x5d, 0x51, 0xbc, 0xbb, 0xf9, 0xf6, 0x74, 0x26, 0x61, 0xd3, + 0x1a, 0xb3, 0x49, 0x28, 0xe7, 0x9a, 0x2f, 0x09, 0x19, 0x19, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x7b, + 0x4d, 0x7c, 0x47, 0x08, 0x90, 0x67, 0x94, 0x24, 0x3d, 0x86, 0x6b, 0x37, 0x1f, 0xde, 0xc0, 0x25, + 0x8c, 0xf8, 0x03, 0x66, 0xc4, 0xef, 0xe2, 0xe5, 0xc0, 0x08, 0xcf, 0x1c, 0x12, 0xcf, 0x16, 0x56, + 0x7c, 0x71, 0x17, 0xdf, 0x0a, 0x39, 0x27, 0xd4, 0x1b, 0x2c, 0x16, 0x47, 0x8f, 0x13, 0x17, 0x2b, + 0x84, 0x46, 0x27, 0x2e, 0x56, 0x18, 0x7a, 0x4e, 0x5a, 0x2c, 0x8e, 0x15, 0x27, 0x2d, 0x96, 0xdf, + 0xb3, 0xf5, 0xff, 0xf3, 0x50, 0xd8, 0xe1, 0x37, 0x7d, 0x91, 0x0d, 0x25, 0x1f, 0x7c, 0x45, 0x6b, + 0x49, 0x08, 0x53, 0xf0, 0x2e, 0xd1, 0xbc, 0x9f, 0xda, 0x2f, 0x0c, 0x7a, 0x8b, 0x19, 0x74, 0x07, + 0xaf, 0x52, 0xcd, 0xe2, 0x32, 0x71, 0x8b, 0xc3, 0x18, 0x2d, 0xa3, 0xdf, 0xa7, 0x8e, 0xf8, 0x13, + 0xa8, 0xa8, 0xe8, 0x28, 0x7a, 0x2b, 0x11, 0xd5, 0x52, 0x01, 0xd6, 0x26, 0x9e, 0xc6, 0x22, 0x34, + 0xbf, 0xcd, 0x34, 0xaf, 0xe1, 0xdb, 0x09, 0x9a, 0x1d, 0xc6, 0x1a, 0x52, 0xce, 0x91, 0xcd, 0x64, + 0xe5, 0x21, 0xe0, 0x34, 0x59, 0x79, 0x18, 0x18, 0x9d, 0xaa, 0x7c, 0xcc, 0x58, 0xa9, 0x72, 0x17, + 0x20, 0xc0, 0x30, 0x51, 0xa2, 0x2f, 0x95, 0x97, 0xa9, 0x68, 0x70, 0x88, 0xc3, 0x9f, 0x18, 0x33, + 0xb5, 0x62, 0xdf, 0x45, 0xd4, 0x0e, 0x4c, 0xd7, 0xe3, 0x07, 0xb3, 0x1a, 0x02, 0x25, 0x51, 0xe2, + 0x7c, 0xc2, 0xc8, 0x66, 0xf3, 0xc1, 0x54, 0x1e, 0xa1, 0xfd, 0x21, 0xd3, 0x7e, 0x1f, 0x37, 0x13, + 0xb4, 0x8f, 0x38, 0x2f, 0xdd, 0x6c, 0x7f, 0x9d, 0x87, 0xf2, 0x0b, 0xc3, 0xb4, 0x3c, 0x62, 0x19, + 0x56, 0x8f, 0xa0, 0x53, 0xc8, 0xb1, 0x4c, 0x1d, 0x0d, 0xc4, 0x2a, 0x60, 0x17, 0x0d, 0xc4, 0x21, + 0x34, 0x0b, 0xaf, 0x33, 0xc5, 0x4d, 0xbc, 0x42, 0x15, 0x0f, 0x03, 0xd1, 0x2d, 0x06, 0x42, 0xd1, + 0x49, 0x9f, 0x41, 0x5e, 0x7c, 0xc3, 0x89, 0x08, 0x0a, 0x81, 0x53, 0xcd, 0xbb, 0xc9, 0x9d, 0x49, + 0x7b, 0x59, 0x55, 0xe3, 0x32, 0x3e, 0xaa, 0x67, 0x02, 0x10, 0xa0, 0xab, 0xd1, 0x15, 0x8d, 0x81, + 0xb1, 0xcd, 0xf5, 0x74, 0x86, 0x24, 0x9f, 0xaa, 0x3a, 0xfb, 0x3e, 0x2f, 0xd5, 0xfb, 0x47, 0x30, + 0xff, 0xdc, 0x70, 0x2f, 0x50, 0x24, 0xf7, 0x2a, 0x37, 0x80, 0x9a, 0xcd, 0xa4, 0x2e, 0xa1, 0xe5, + 0x3e, 0xd3, 0x72, 0x9b, 0x87, 0x32, 0x55, 0xcb, 0x85, 0xe1, 0xd2, 0xa4, 0x86, 0xfa, 0x90, 0xe7, + 0x17, 0x82, 0xa2, 0xfe, 0x0b, 0x5d, 0x2a, 0x8a, 0xfa, 0x2f, 0x7c, 0x87, 0xe8, 0x66, 0x2d, 0x23, + 0x28, 0xca, 0x1b, 0x38, 0x28, 0xf2, 0x39, 0x36, 0x72, 0x5b, 0xa7, 0xb9, 0x96, 0xd6, 0x2d, 0x74, + 0x3d, 0x60, 0xba, 0xee, 0xe1, 0x46, 0x6c, 0xad, 0x04, 0xe7, 0x13, 0xed, 0xd1, 0x87, 0x1a, 0xfa, + 0x11, 0x40, 0x00, 0x48, 0xc7, 0x4e, 0x60, 0x14, 0xdb, 0x8e, 0x9d, 0xc0, 0x18, 0x96, 0x8d, 0x37, + 0x99, 0xde, 0x0d, 0xfc, 0x20, 0xaa, 0xd7, 0x73, 0x0c, 0xcb, 0x3d, 0x23, 0xce, 0x07, 0x1c, 0x74, + 0x74, 0x2f, 0xcc, 0x11, 0x3d, 0x0c, 0xff, 0xba, 0x00, 0xf3, 0xb4, 0x02, 0xa6, 0x85, 0x42, 0x00, + 0x1c, 0x44, 0x2d, 0x89, 0x01, 0x7c, 0x51, 0x4b, 0xe2, 0x98, 0x43, 0xb8, 0x50, 0x60, 0xbf, 0x11, + 0x21, 0x8c, 0x81, 0x3a, 0xda, 0x86, 0xb2, 0x82, 0x2c, 0xa0, 0x04, 0x61, 0x61, 0xe4, 0x30, 0x9a, + 0x7a, 0x12, 0x60, 0x09, 0x7c, 0x87, 0xe9, 0x5b, 0xe1, 0xa9, 0x87, 0xe9, 0xeb, 0x73, 0x0e, 0xaa, + 0xf0, 0x35, 0x54, 0x54, 0xf4, 0x01, 0x25, 0xc8, 0x8b, 0xa0, 0x92, 0xd1, 0x30, 0x9b, 0x04, 0x5e, + 0x84, 0x0f, 0xbe, 0xff, 0x3b, 0x18, 0xc9, 0x46, 0x15, 0x0f, 0xa0, 0x20, 0xe0, 0x88, 0xa4, 0x59, + 0x86, 0x21, 0xcc, 0xa4, 0x59, 0x46, 0xb0, 0x8c, 0x70, 0x71, 0xc9, 0x34, 0xd2, 0x37, 0x2e, 0x99, + 0xca, 0x84, 0xb6, 0x67, 0xc4, 0x4b, 0xd3, 0x16, 0xa0, 0x6b, 0x69, 0xda, 0x94, 0xb7, 0xdd, 0x34, + 0x6d, 0xe7, 0xc4, 0x13, 0xc7, 0x45, 0xbe, 0x45, 0xa2, 0x14, 0x61, 0x6a, 0xfa, 0xc0, 0xd3, 0x58, + 0x92, 0x6a, 0xff, 0x40, 0xa1, 0xcc, 0x1d, 0x57, 0x00, 0x01, 0x58, 0x12, 0x2d, 0xe8, 0x12, 0x11, + 0xd7, 0x68, 0x41, 0x97, 0x8c, 0xb7, 0x84, 0x43, 0x43, 0xa0, 0x97, 0xbf, 0x7a, 0x50, 0xcd, 0x3f, + 0xd3, 0x00, 0xc5, 0x71, 0x15, 0xf4, 0x38, 0x59, 0x7a, 0x22, 0x8e, 0xdb, 0x7c, 0xff, 0xcd, 0x98, + 0x93, 0xa2, 0x7d, 0x60, 0x52, 0x8f, 0x71, 0x8f, 0x5e, 0x53, 0xa3, 0xfe, 0x42, 0x83, 0x6a, 0x08, + 0x94, 0x41, 0xef, 0xa4, 0xac, 0x69, 0x04, 0x06, 0x6e, 0xbe, 0x7b, 0x23, 0x5f, 0x52, 0xa5, 0xab, + 0xec, 0x00, 0x59, 0xf2, 0xff, 0x44, 0x83, 0x5a, 0x18, 0xc4, 0x41, 0x29, 0xb2, 0x63, 0x30, 0x72, + 0x73, 0xe3, 0x66, 0xc6, 0xe9, 0xcb, 0x13, 0x54, 0xfb, 0x03, 0x28, 0x08, 0xd8, 0x27, 0x69, 0xe3, + 0x87, 0x01, 0xe8, 0xa4, 0x8d, 0x1f, 0xc1, 0x8c, 0x12, 0x36, 0xbe, 0x63, 0x0f, 0x88, 0x72, 0xcc, + 0x04, 0x2e, 0x94, 0xa6, 0x6d, 0xfa, 0x31, 0x8b, 0x80, 0x4a, 0x69, 0xda, 0x82, 0x63, 0x26, 0x01, + 0x21, 0x94, 0x22, 0xec, 0x86, 0x63, 0x16, 0xc5, 0x93, 0x12, 0x8e, 0x19, 0x53, 0xa8, 0x1c, 0xb3, + 0x00, 0xba, 0x49, 0x3a, 0x66, 0x31, 0x3c, 0x3d, 0xe9, 0x98, 0xc5, 0xd1, 0x9f, 0x84, 0x75, 0x64, + 0x7a, 0x43, 0xc7, 0x6c, 0x29, 0x01, 0xe5, 0x41, 0xef, 0xa7, 0x38, 0x31, 0x11, 0xa6, 0x6f, 0x7e, + 0xf0, 0x86, 0xdc, 0xa9, 0x7b, 0x9c, 0xbb, 0x5f, 0xee, 0xf1, 0xbf, 0xd1, 0x60, 0x39, 0x09, 0x21, + 0x42, 0x29, 0x7a, 0x52, 0xe0, 0xfd, 0xe6, 0xe6, 0x9b, 0xb2, 0x4f, 0xf7, 0x96, 0xbf, 0xeb, 0x9f, + 0xd6, 0xff, 0xed, 0xab, 0x35, 0xed, 0x3f, 0xbf, 0x5a, 0xd3, 0xfe, 0xe7, 0xab, 0x35, 0xed, 0x6f, + 0xff, 0x77, 0x6d, 0xee, 0x34, 0xcf, 0x7e, 0x5d, 0xf9, 0xed, 0x5f, 0x05, 0x00, 0x00, 0xff, 0xff, + 0x52, 0x4e, 0xd7, 0x33, 0xe4, 0x39, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto similarity index 88% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto rename to cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto index d9da43c0973..423eabada4e 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto @@ -15,7 +15,7 @@ service KV { // Range gets the keys in the range from the key-value store. rpc Range(RangeRequest) returns (RangeResponse) { option (google.api.http) = { - post: "/v3beta/kv/range" + post: "/v3/kv/range" body: "*" }; } @@ -25,7 +25,7 @@ service KV { // and generates one event in the event history. rpc Put(PutRequest) returns (PutResponse) { option (google.api.http) = { - post: "/v3beta/kv/put" + post: "/v3/kv/put" body: "*" }; } @@ -35,7 +35,7 @@ service KV { // and generates a delete event in the event history for every deleted key. rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { option (google.api.http) = { - post: "/v3beta/kv/deleterange" + post: "/v3/kv/deleterange" body: "*" }; } @@ -46,7 +46,7 @@ service KV { // It is not allowed to modify the same key several times within one txn. rpc Txn(TxnRequest) returns (TxnResponse) { option (google.api.http) = { - post: "/v3beta/kv/txn" + post: "/v3/kv/txn" body: "*" }; } @@ -56,7 +56,7 @@ service KV { // indefinitely. rpc Compact(CompactionRequest) returns (CompactionResponse) { option (google.api.http) = { - post: "/v3beta/kv/compaction" + post: "/v3/kv/compaction" body: "*" }; } @@ -70,7 +70,7 @@ service Watch { // last compaction revision. rpc Watch(stream WatchRequest) returns (stream WatchResponse) { option (google.api.http) = { - post: "/v3beta/watch" + post: "/v3/watch" body: "*" }; } @@ -82,7 +82,7 @@ service Lease { // deleted if the lease expires. Each expired key generates a delete event in the event history. rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { option (google.api.http) = { - post: "/v3beta/lease/grant" + post: "/v3/lease/grant" body: "*" }; } @@ -90,8 +90,12 @@ service Lease { // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/revoke" + post: "/v3/lease/revoke" body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } }; } @@ -99,7 +103,7 @@ service Lease { // to the server and streaming keep alive responses from the server to the client. rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { option (google.api.http) = { - post: "/v3beta/lease/keepalive" + post: "/v3/lease/keepalive" body: "*" }; } @@ -107,16 +111,24 @@ service Lease { // LeaseTimeToLive retrieves lease information. rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/timetolive" + post: "/v3/lease/timetolive" body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } }; } // LeaseLeases lists all existing leases. rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/leases" + post: "/v3/lease/leases" body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } }; } } @@ -125,7 +137,7 @@ service Cluster { // MemberAdd adds a member into the cluster. rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/add" + post: "/v3/cluster/member/add" body: "*" }; } @@ -133,7 +145,7 @@ service Cluster { // MemberRemove removes an existing member from the cluster. rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/remove" + post: "/v3/cluster/member/remove" body: "*" }; } @@ -141,7 +153,7 @@ service Cluster { // MemberUpdate updates the member configuration. rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/update" + post: "/v3/cluster/member/update" body: "*" }; } @@ -149,7 +161,15 @@ service Cluster { // MemberList lists all the members in the cluster. rpc MemberList(MemberListRequest) returns (MemberListResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/list" + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" body: "*" }; } @@ -159,7 +179,7 @@ service Maintenance { // Alarm activates, deactivates, and queries alarms regarding cluster health. rpc Alarm(AlarmRequest) returns (AlarmResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/alarm" + post: "/v3/maintenance/alarm" body: "*" }; } @@ -167,7 +187,7 @@ service Maintenance { // Status gets the status of the member. rpc Status(StatusRequest) returns (StatusResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/status" + post: "/v3/maintenance/status" body: "*" }; } @@ -175,25 +195,29 @@ service Maintenance { // Defragment defragments a member's backend database to recover storage space. rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/defragment" + post: "/v3/maintenance/defragment" body: "*" }; } - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. rpc Hash(HashRequest) returns (HashResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/hash" + post: "/v3/maintenance/hash" body: "*" }; } // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. rpc HashKV(HashKVRequest) returns (HashKVResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/hash" + post: "/v3/maintenance/hash" body: "*" }; } @@ -201,7 +225,7 @@ service Maintenance { // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/snapshot" + post: "/v3/maintenance/snapshot" body: "*" }; } @@ -209,7 +233,7 @@ service Maintenance { // MoveLeader requests current leader node to transfer its leadership to transferee. rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/transfer-leadership" + post: "/v3/maintenance/transfer-leadership" body: "*" }; } @@ -219,7 +243,7 @@ service Auth { // AuthEnable enables authentication. rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { option (google.api.http) = { - post: "/v3beta/auth/enable" + post: "/v3/auth/enable" body: "*" }; } @@ -227,7 +251,7 @@ service Auth { // AuthDisable disables authentication. rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { option (google.api.http) = { - post: "/v3beta/auth/disable" + post: "/v3/auth/disable" body: "*" }; } @@ -235,15 +259,15 @@ service Auth { // Authenticate processes an authenticate request. rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { option (google.api.http) = { - post: "/v3beta/auth/authenticate" + post: "/v3/auth/authenticate" body: "*" }; } - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/add" + post: "/v3/auth/user/add" body: "*" }; } @@ -251,7 +275,7 @@ service Auth { // UserGet gets detailed user information. rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/get" + post: "/v3/auth/user/get" body: "*" }; } @@ -259,7 +283,7 @@ service Auth { // UserList gets a list of all users. rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/list" + post: "/v3/auth/user/list" body: "*" }; } @@ -267,7 +291,7 @@ service Auth { // UserDelete deletes a specified user. rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/delete" + post: "/v3/auth/user/delete" body: "*" }; } @@ -275,7 +299,7 @@ service Auth { // UserChangePassword changes the password of a specified user. rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/changepw" + post: "/v3/auth/user/changepw" body: "*" }; } @@ -283,7 +307,7 @@ service Auth { // UserGrant grants a role to a specified user. rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/grant" + post: "/v3/auth/user/grant" body: "*" }; } @@ -291,15 +315,15 @@ service Auth { // UserRevokeRole revokes a role of specified user. rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/revoke" + post: "/v3/auth/user/revoke" body: "*" }; } - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/add" + post: "/v3/auth/role/add" body: "*" }; } @@ -307,7 +331,7 @@ service Auth { // RoleGet gets detailed role information. rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/get" + post: "/v3/auth/role/get" body: "*" }; } @@ -315,7 +339,7 @@ service Auth { // RoleList gets lists of all roles. rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/list" + post: "/v3/auth/role/list" body: "*" }; } @@ -323,7 +347,7 @@ service Auth { // RoleDelete deletes a specified role. rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/delete" + post: "/v3/auth/role/delete" body: "*" }; } @@ -331,7 +355,7 @@ service Auth { // RoleGrantPermission grants a permission of a specified key or range to a specified role. rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/grant" + post: "/v3/auth/role/grant" body: "*" }; } @@ -339,7 +363,7 @@ service Auth { // RoleRevokePermission revokes a key or range permission of a specified role. rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/revoke" + post: "/v3/auth/role/revoke" body: "*" }; } @@ -418,7 +442,7 @@ message RangeRequest { int64 max_mod_revision = 11; // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. + // lesser create revisions will be filtered away. int64 min_create_revision = 12; // max_create_revision is the upper bound for returned key create revisions; all keys with @@ -519,7 +543,7 @@ message Compare { VERSION = 0; CREATE = 1; MOD = 2; - VALUE= 3; + VALUE = 3; LEASE = 4; } // result is logical comparison operation for this comparison. @@ -649,14 +673,17 @@ message WatchRequest { message WatchCreateRequest { // key is the key to register for watching. bytes key = 1; + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, // only the key argument is watched. If range_end is equal to '\0', all keys greater than // or equal to the key argument are watched. // If the range_end is one bit larger than the given key, // then all keys with the prefix (the given key) will be watched. bytes range_end = 2; + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". int64 start_revision = 3; + // progress_notify is set so that the etcd server will periodically send a WatchResponse with // no events to the new watcher if there are no recent events. It is useful when clients // wish to recover a disconnected watcher starting from a recent known revision. @@ -664,11 +691,12 @@ message WatchCreateRequest { bool progress_notify = 4; enum FilterType { - // filter out put event. - NOPUT = 0; - // filter out delete event. - NODELETE = 1; + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; } + // filters filter the events at server side before it sends back to the watcher. repeated FilterType filters = 5; @@ -701,14 +729,17 @@ message WatchResponse { ResponseHeader header = 1; // watch_id is the ID of the watcher that corresponds to the response. int64 watch_id = 2; + // created is set to true if the response is for a create watch request. // The client should record the watch_id and expect to receive events for // the created watcher from the same stream. // All events sent to the created watcher will attach with the same watch_id. bool created = 3; + // canceled is set to true if the response is for a cancel watch request. // No further events will be sent to the canceled watcher. bool canceled = 4; + // compact_revision is set to the minimum index if a watcher tries to watch // at a compacted index. // @@ -717,7 +748,7 @@ message WatchResponse { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - int64 compact_revision = 5; + int64 compact_revision = 5; // cancel_reason indicates the reason for canceling the watcher. string cancel_reason = 6; @@ -753,6 +784,22 @@ message LeaseRevokeResponse { ResponseHeader header = 1; } +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + message LeaseKeepAliveRequest { // ID is the lease ID for the lease to keep alive. int64 ID = 1; @@ -807,11 +854,15 @@ message Member { repeated string peerURLs = 3; // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; } message MemberAddRequest { // peerURLs is the list of URLs the added member will use to communicate with the cluster. repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; } message MemberAddResponse { @@ -855,6 +906,17 @@ message MemberListResponse { repeated Member members = 2; } +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + message DefragmentRequest { } @@ -914,14 +976,22 @@ message StatusResponse { ResponseHeader header = 1; // version is the cluster protocol version used by the responding member. string version = 2; - // dbSize is the size of the backend database, in bytes, of the responding member. + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. int64 dbSize = 3; // leader is the member ID which the responding member believes is the current leader. uint64 leader = 4; - // raftIndex is the current raft index of the responding member. + // raftIndex is the current raft committed index of the responding member. uint64 raftIndex = 5; // raftTerm is the current raft term of the responding member. uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; } message AuthEnableRequest { @@ -938,6 +1008,7 @@ message AuthenticateRequest { message AuthUserAddRequest { string name = 1; string password = 2; + authpb.UserAddOptions options = 3; } message AuthUserGetRequest { @@ -996,8 +1067,8 @@ message AuthRoleGrantPermissionRequest { message AuthRoleRevokePermissionRequest { string role = 1; - string key = 2; - string range_end = 3; + bytes key = 2; + bytes range_end = 3; } message AuthEnableResponse { diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go similarity index 75% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go index 4679da5057f..23fe337a59b 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go @@ -1,16 +1,28 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: kv.proto +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ package mvccpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -35,7 +47,6 @@ var Event_EventType_name = map[int32]string{ 0: "PUT", 1: "DELETE", } - var Event_EventType_value = map[string]int32{ "PUT": 0, "DELETE": 1, @@ -44,10 +55,7 @@ var Event_EventType_value = map[string]int32{ func (x Event_EventType) String() string { return proto.EnumName(Event_EventType_name, int32(x)) } - -func (Event_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1, 0} -} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } type KeyValue struct { // key is the key in bytes. An empty key is not allowed. @@ -65,44 +73,13 @@ type KeyValue struct { // lease is the ID of the lease that attached to key. // When the attached lease expires, the key will be deleted. // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{0} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return m.Size() -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` } -var xxx_messageInfo_KeyValue proto.InternalMessageInfo +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } type Event struct { // type is the kind of event. If type is a PUT, it indicates @@ -114,82 +91,25 @@ type Event struct { // A PUT event with kv.Version=1 indicates the creation of a key. // A DELETE/EXPIRE event contains the deleted key with // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } -var xxx_messageInfo_Event proto.InternalMessageInfo +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } func init() { - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) } - -func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } - -var fileDescriptor_2216fe83c9c12408 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} - func (m *KeyValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -197,60 +117,49 @@ func (m *KeyValue) Marshal() (dAtA []byte, err error) { } func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Lease != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x30 + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x2a + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) } if m.Version != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - i-- dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) } - if m.ModRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - i-- - dAtA[i] = 0x18 - } - if m.CreateRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - i-- - dAtA[i] = 0x10 + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) } - return len(dAtA) - i, nil + return i, nil } func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -258,66 +167,48 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) } if m.Kv != nil { - { - size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - if m.Type != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - offset -= sovKv(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *KeyValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -340,16 +231,10 @@ func (m *KeyValue) Size() (n int) { if m.Lease != 0 { n += 1 + sovKv(uint64(m.Lease)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Event) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Type != 0 { @@ -363,14 +248,18 @@ func (m *Event) Size() (n int) { l = m.PrevKv.Size() n += 1 + l + sovKv(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovKv(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozKv(x uint64) (n int) { return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -390,7 +279,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -418,7 +307,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -427,9 +316,6 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -452,7 +338,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CreateRevision |= int64(b&0x7F) << shift + m.CreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -471,7 +357,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ModRevision |= int64(b&0x7F) << shift + m.ModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -490,7 +376,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Version |= int64(b&0x7F) << shift + m.Version |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -509,7 +395,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -518,9 +404,6 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -543,7 +426,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -557,13 +440,9 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthKv } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthKv - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -588,7 +467,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -616,7 +495,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= Event_EventType(b&0x7F) << shift + m.Type |= (Event_EventType(b) & 0x7F) << shift if b < 0x80 { break } @@ -635,7 +514,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -644,9 +523,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -671,7 +547,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -680,9 +556,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -702,13 +575,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthKv } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthKv - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -772,11 +641,8 @@ func skipKv(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthKv - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthKv } return iNdEx, nil @@ -807,9 +673,6 @@ func skipKv(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthKv - } } return iNdEx, nil case 4: @@ -828,3 +691,28 @@ var ( ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto rename to cluster-autoscaler/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/logger.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/logger.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go similarity index 97% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go index 378bee0e3c3..729cbdb57e4 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go @@ -27,7 +27,7 @@ var _ Logger = &packageLogger{} // For example: // // var defaultLogger Logger -// defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot") +// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot") // func NewPackageLogger(repo, pkg string) Logger { return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap.go similarity index 88% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap.go index 2f692233aa8..8fc6e03b77b 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap.go @@ -53,15 +53,12 @@ var DefaultZapLoggerConfig = zap.Config{ ErrorOutputPaths: []string{"stderr"}, } -// AddOutputPaths adds output paths to the existing output paths, resolving conflicts. -func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config { +// MergeOutputPaths merges logging output paths, resolving conflicts. +func MergeOutputPaths(cfg zap.Config) zap.Config { outputs := make(map[string]struct{}) for _, v := range cfg.OutputPaths { outputs[v] = struct{}{} } - for _, v := range outputPaths { - outputs[v] = struct{}{} - } outputSlice := make([]string, 0) if _, ok := outputs["/dev/null"]; ok { // "/dev/null" to discard all @@ -78,9 +75,6 @@ func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap. for _, v := range cfg.ErrorOutputPaths { errOutputs[v] = struct{}{} } - for _, v := range errorOutputPaths { - errOutputs[v] = struct{}{} - } errOutputSlice := make([]string, 0) if _, ok := errOutputs["/dev/null"]; ok { // "/dev/null" to discard all diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go index b1788bc83f8..fcd39038107 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go @@ -24,7 +24,7 @@ import ( "os" "path/filepath" - "github.com/coreos/etcd/pkg/systemd" + "go.etcd.io/etcd/pkg/systemd" "github.com/coreos/go-systemd/journal" "go.uber.org/zap/zapcore" diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go index 012d688d2d9..f016b3054e3 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go @@ -17,7 +17,7 @@ package logutil import ( "errors" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/raft" "go.uber.org/zap" "go.uber.org/zap/zapcore" diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/systemd/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/systemd/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/systemd/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/systemd/journal.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/systemd/journal.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/systemd/journal.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go index 79b1f632ed5..3a5aef089a7 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go @@ -41,6 +41,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { if err != nil { return nil, err } + certPool.AddCert(cert) } } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/listener.go similarity index 51% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/listener.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/listener.go index 48655063f6f..80e35bda59a 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/listener.go @@ -31,14 +31,17 @@ import ( "strings" "time" - "github.com/coreos/etcd/pkg/tlsutil" + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" ) +// NewListener creates a new listner. func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlsinfo, l) + return wrapTLS(scheme, tlsinfo, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -49,21 +52,24 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } + if tlsinfo != nil && tlsinfo.SkipClientSANVerify { + return NewTLSListener(l, tlsinfo) + } return newTLSListener(l, tlsinfo, checkSAN) } type TLSInfo struct { - CertFile string - KeyFile string - CAFile string // TODO: deprecate this in v4 - TrustedCAFile string - ClientCertAuth bool - CRLFile string - InsecureSkipVerify bool + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + SkipClientSANVerify bool // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string @@ -85,20 +91,33 @@ type TLSInfo struct { // AllowedCN is a CN which must be provided by a client. AllowedCN string + + // AllowedHostname is an IP address or hostname that must match the TLS + // certificate provided by a client. + AllowedHostname string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool } func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) } func (info TLSInfo) Empty() bool { return info.CertFile == "" && info.KeyFile == "" } -func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { if err = os.MkdirAll(dirpath, 0700); err != nil { return } + info.Logger = lg certPath := filepath.Join(dirpath, "cert.pem") keyPath := filepath.Join(dirpath, "key.pem") @@ -114,6 +133,12 @@ func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } return } @@ -124,7 +149,7 @@ func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { NotAfter: time.Now().Add(365 * (24 * time.Hour)), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), BasicConstraintsValid: true, } @@ -139,20 +164,40 @@ func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } return } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } return } certOut, err := os.Create(certPath) if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) return } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } b, err := x509.MarshalECPrivateKey(priv) if err != nil { @@ -160,18 +205,50 @@ func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { } keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } return } pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) keyOut.Close() - - return SelfCert(dirpath, hosts) + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) } +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). func (info TLSInfo) baseConfig() (*tls.Config, error) { if info.KeyFile == "" || info.CertFile == "" { return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) } + if info.Logger == nil { + info.Logger = zap.NewNop() + } _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if err != nil { @@ -187,26 +264,82 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { cfg.CipherSuites = info.CipherSuites } + // Client certificates may be verified by either an exact match on the CN, + // or a more general check of the CN and SANs. + var verifyCertificate func(*x509.Certificate) bool if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { if len(chains) != 0 { - if info.AllowedCN == chains[0].Subject.CommonName { + if verifyCertificate(chains[0]) { return nil } } } - return errors.New("CommonName authentication failed") + return errors.New("client certificate authentication failed") } } // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err } return cfg, nil } @@ -214,9 +347,6 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // cafiles returns a list of CA file paths. func (info TLSInfo) cafiles() []string { cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } if info.TrustedCAFile != "" { cs = append(cs, info.TrustedCAFile) } @@ -231,13 +361,13 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) { } cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { + if info.TrustedCAFile != "" || info.ClientCertAuth { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) if err != nil { return nil, err } @@ -265,9 +395,9 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { } cfg.InsecureSkipVerify = info.InsecureSkipVerify - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) if err != nil { return nil, err } @@ -276,6 +406,28 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if info.selfCert { cfg.InsecureSkipVerify = true } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + return cfg, nil } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go index b35e04955bb..273e99fe038 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go @@ -32,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/tls.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/tls.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/tls.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/transport.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/transport.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/transport.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/doc.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/doc.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/id.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/id.go index 1b042d9ce65..ae00388dde0 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/id.go @@ -14,9 +14,7 @@ package types -import ( - "strconv" -) +import "strconv" // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/set.go similarity index 89% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/set.go index c111b0c0c0b..e7a3cdc9ab6 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/set.go @@ -148,6 +148,14 @@ func (ts *tsafeSet) Contains(value string) (exists bool) { func (ts *tsafeSet) Equals(other Set) bool { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } return ts.us.Equals(other) } @@ -173,6 +181,15 @@ func (ts *tsafeSet) Copy() Set { func (ts *tsafeSet) Sub(other Set) Set { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } usResult := ts.us.Sub(other).(*unsafeSet) return &tsafeSet{usResult, sync.RWMutex{}} } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/slice.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/slice.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/slice.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/slice.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urls.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/urls.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urls.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/urls.go diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/OWNERS b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/OWNERS new file mode 100644 index 00000000000..ab781066e23 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/OWNERS @@ -0,0 +1,19 @@ +approvers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +reviewers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +- tschottdorf diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/README.md b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/README.md similarity index 91% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/README.md rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/README.md index fde22b16519..83cf04035c9 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/README.md +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/README.md @@ -3,7 +3,7 @@ Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. The state machine is kept in sync through the use of a replicated log. For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. @@ -13,7 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/master/contrib/raftexample # Features @@ -21,7 +21,7 @@ This raft implementation is a full feature implementation of Raft protocol. Feat - Leader election - Log replication -- Log compaction +- Log compaction - Membership changes - Leadership transfer extension - Efficient linearizable read-only queries served by both the leader and followers @@ -40,13 +40,14 @@ This raft implementation also includes a few optional enhancements: - Batching log entries to reduce disk synchronized I/O - Writing to leader's disk in parallel - Internal proposal redirection from followers to leader -- Automatic stepping down when the leader loses quorum +- Automatic stepping down when the leader loses quorum +- Protection against unbounded log growth when quorum is lost ## Notable Users - [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database - [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database -- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store +- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. - [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks @@ -140,7 +141,7 @@ The total state machine handling loop will look something like this: case <-s.Ticker: n.Tick() case rd := <-s.Node.Ready(): - saveToStorage(rd.State, rd.Entries, rd.Snapshot) + saveToStorage(rd.HardState, rd.Entries, rd.Snapshot) send(rd.Messages) if !raft.IsEmptySnap(rd.Snapshot) { processSnapshot(rd.Snapshot) @@ -166,7 +167,7 @@ To propose changes to the state machine from the node to take application data, n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -189,7 +190,7 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. +This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/bootstrap.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/bootstrap.go new file mode 100644 index 00000000000..bd82b2041af --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/bootstrap.go @@ -0,0 +1,80 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Bootstrap initializes the RawNode for first use by appending configuration +// changes for the supplied peers. This method returns an error if the Storage +// is nonempty. +// +// It is recommended that instead of calling this method, applications bootstrap +// their state manually by setting up a Storage that has a first index > 1 and +// which stores the desired ConfState as its InitialState. +func (rn *RawNode) Bootstrap(peers []Peer) error { + if len(peers) == 0 { + return errors.New("must provide at least one peer to Bootstrap") + } + lastIndex, err := rn.raft.raftLog.storage.LastIndex() + if err != nil { + return err + } + + if lastIndex != 0 { + return errors.New("can't bootstrap a nonempty Storage") + } + + // We've faked out initial entries above, but nothing has been + // persisted. Start with an empty HardState (thus the first Ready will + // emit a HardState update for the app to persist). + rn.prevHardSt = emptyState + + // TODO(tbg): remove StartNode and give the application the right tools to + // bootstrap the initial membership in a cleaner way. + rn.raft.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + return err + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + rn.raft.raftLog.append(ents...) + + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + // + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + rn.raft.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2()) + } + return nil +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/confchange.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/confchange.go new file mode 100644 index 00000000000..a0dc486df4f --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/confchange.go @@ -0,0 +1,425 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + "errors" + "fmt" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Changer facilitates configuration changes. It exposes methods to handle +// simple and joint consensus while performing the proper validation that allows +// refusing invalid configuration changes before they affect the active +// configuration. +type Changer struct { + Tracker tracker.ProgressTracker + LastIndex uint64 +} + +// EnterJoint verifies that the outgoing (=right) majority config of the joint +// config is empty and initializes it with a copy of the incoming (=left) +// majority config. That is, it transitions from +// +// (1 2 3)&&() +// to +// (1 2 3)&&(1 2 3). +// +// The supplied changes are then applied to the incoming majority config, +// resulting in a joint configuration that in terms of the Raft thesis[1] +// (Section 4.3) corresponds to `C_{new,old}`. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("config is already joint") + return c.err(err) + } + if len(incoming(cfg.Voters)) == 0 { + // We allow adding nodes to an empty config for convenience (testing and + // bootstrap), but you can't enter a joint state. + err := errors.New("can't make a zero-voter config joint") + return c.err(err) + } + // Clear the outgoing config. + *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{} + // Copy incoming to outgoing. + for id := range incoming(cfg.Voters) { + outgoing(cfg.Voters)[id] = struct{}{} + } + + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + cfg.AutoLeave = autoLeave + return checkAndReturn(cfg, prs) +} + +// LeaveJoint transitions out of a joint configuration. It is an error to call +// this method if the configuration is not joint, i.e. if the outgoing majority +// config Voters[1] is empty. +// +// The outgoing majority config of the joint configuration will be removed, +// that is, the incoming config is promoted as the sole decision maker. In the +// notation of the Raft thesis[1] (Section 4.3), this method transitions from +// `C_{new,old}` into `C_new`. +// +// At the same time, any staged learners (LearnersNext) the addition of which +// was held back by an overlapping voter in the former outgoing config will be +// inserted into Learners. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if !joint(cfg) { + err := errors.New("can't leave a non-joint config") + return c.err(err) + } + if len(outgoing(cfg.Voters)) == 0 { + err := fmt.Errorf("configuration is not joint: %v", cfg) + return c.err(err) + } + for id := range cfg.LearnersNext { + nilAwareAdd(&cfg.Learners, id) + prs[id].IsLearner = true + } + cfg.LearnersNext = nil + + for id := range outgoing(cfg.Voters) { + _, isVoter := incoming(cfg.Voters)[id] + _, isLearner := cfg.Learners[id] + + if !isVoter && !isLearner { + delete(prs, id) + } + } + *outgoingPtr(&cfg.Voters) = nil + cfg.AutoLeave = false + + return checkAndReturn(cfg, prs) +} + +// Simple carries out a series of configuration changes that (in aggregate) +// mutates the incoming majority config Voters[0] by at most one. This method +// will return an error if that is not the case, if the resulting quorum is +// zero, or if the configuration is in a joint state (i.e. if there is an +// outgoing configuration). +func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("can't apply simple config change in joint config") + return c.err(err) + } + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { + return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config") + } + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, nil + } + + return checkAndReturn(cfg, prs) +} + +// apply a change to the configuration. By convention, changes to voters are +// always made to the incoming majority config Voters[0]. Voters[1] is either +// empty or preserves the outgoing majority configuration while in a joint state. +func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { + for _, cc := range ccs { + if cc.NodeID == 0 { + // etcd replaces the NodeID with zero if it decides (downstream of + // raft) to not apply a change, so we have to have explicit code + // here to ignore these. + continue + } + switch cc.Type { + case pb.ConfChangeAddNode: + c.makeVoter(cfg, prs, cc.NodeID) + case pb.ConfChangeAddLearnerNode: + c.makeLearner(cfg, prs, cc.NodeID) + case pb.ConfChangeRemoveNode: + c.remove(cfg, prs, cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + return fmt.Errorf("unexpected conf type %d", cc.Type) + } + } + if len(incoming(cfg.Voters)) == 0 { + return errors.New("removed all voters") + } + return nil +} + +// makeVoter adds or promotes the given ID to be a voter in the incoming +// majority config. +func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, false /* isLearner */) + return + } + + pr.IsLearner = false + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + incoming(cfg.Voters)[id] = struct{}{} + return +} + +// makeLearner makes the given ID a learner or stages it to be a learner once +// an active joint configuration is exited. +// +// The former happens when the peer is not a part of the outgoing config, in +// which case we either add a new learner or demote a voter in the incoming +// config. +// +// The latter case occurs when the configuration is joint and the peer is a +// voter in the outgoing config. In that case, we do not want to add the peer +// as a learner because then we'd have to track a peer as a voter and learner +// simultaneously. Instead, we add the learner to LearnersNext, so that it will +// be added to Learners the moment the outgoing config is removed by +// LeaveJoint(). +func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, true /* isLearner */) + return + } + if pr.IsLearner { + return + } + // Remove any existing voter in the incoming config... + c.remove(cfg, prs, id) + // ... but save the Progress. + prs[id] = pr + // Use LearnersNext if we can't add the learner to Learners directly, i.e. + // if the peer is still tracked as a voter in the outgoing config. It will + // be turned into a learner in LeaveJoint(). + // + // Otherwise, add a regular learner right away. + if _, onRight := outgoing(cfg.Voters)[id]; onRight { + nilAwareAdd(&cfg.LearnersNext, id) + } else { + pr.IsLearner = true + nilAwareAdd(&cfg.Learners, id) + } +} + +// remove this peer as a voter or learner from the incoming config. +func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + if _, ok := prs[id]; !ok { + return + } + + delete(incoming(cfg.Voters), id) + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + + // If the peer is still a voter in the outgoing config, keep the Progress. + if _, onRight := outgoing(cfg.Voters)[id]; !onRight { + delete(prs, id) + } +} + +// initProgress initializes a new progress for the given node or learner. +func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { + if !isLearner { + incoming(cfg.Voters)[id] = struct{}{} + } else { + nilAwareAdd(&cfg.Learners, id) + } + prs[id] = &tracker.Progress{ + // Initializing the Progress with the last index means that the follower + // can be probed (with the last index). + // + // TODO(tbg): seems awfully optimistic. Using the first index would be + // better. The general expectation here is that the follower has no log + // at all (and will thus likely need a snapshot), though the app may + // have applied a snapshot out of band before adding the replica (thus + // making the first index the better choice). + Next: c.LastIndex, + Match: 0, + Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + IsLearner: isLearner, + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has had a chance to communicate with us. + RecentActive: true, + } +} + +// checkInvariants makes sure that the config and progress are compatible with +// each other. This is used to check both what the Changer is initialized with, +// as well as what it returns. +func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { + // NB: intentionally allow the empty config. In production we'll never see a + // non-empty config (we prevent it from being created) but we will need to + // be able to *create* an initial config, for example during bootstrap (or + // during tests). Instead of having to hand-code this, we allow + // transitioning from an empty config into any other legal and non-empty + // config. + for _, ids := range []map[uint64]struct{}{ + cfg.Voters.IDs(), + cfg.Learners, + cfg.LearnersNext, + } { + for id := range ids { + if _, ok := prs[id]; !ok { + return fmt.Errorf("no progress for %d", id) + } + } + } + + // Any staged learner was staged because it could not be directly added due + // to a conflicting voter in the outgoing config. + for id := range cfg.LearnersNext { + if _, ok := outgoing(cfg.Voters)[id]; !ok { + return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id) + } + if prs[id].IsLearner { + return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id) + } + } + // Conversely Learners and Voters doesn't intersect at all. + for id := range cfg.Learners { + if _, ok := outgoing(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[1]", id) + } + if _, ok := incoming(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[0]", id) + } + if !prs[id].IsLearner { + return fmt.Errorf("%d is in Learners, but is not marked as learner", id) + } + } + + if !joint(cfg) { + // We enforce that empty maps are nil instead of zero. + if outgoing(cfg.Voters) != nil { + return fmt.Errorf("Voters[1] must be nil when not joint") + } + if cfg.LearnersNext != nil { + return fmt.Errorf("LearnersNext must be nil when not joint") + } + if cfg.AutoLeave { + return fmt.Errorf("AutoLeave must be false when not joint") + } + } + + return nil +} + +// checkAndCopy copies the tracker's config and progress map (deeply enough for +// the purposes of the Changer) and returns those copies. It returns an error +// if checkInvariants does. +func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { + cfg := c.Tracker.Config.Clone() + prs := tracker.ProgressMap{} + + for id, pr := range c.Tracker.Progress { + // A shallow copy is enough because we only mutate the Learner field. + ppr := *pr + prs[id] = &ppr + } + return checkAndReturn(cfg, prs) +} + +// checkAndReturn calls checkInvariants on the input and returns either the +// resulting error or the input. +func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, err + } + return cfg, prs, nil +} + +// err returns zero values and an error. +func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) { + return tracker.Config{}, nil, err +} + +// nilAwareAdd populates a map entry, creating the map if necessary. +func nilAwareAdd(m *map[uint64]struct{}, id uint64) { + if *m == nil { + *m = map[uint64]struct{}{} + } + (*m)[id] = struct{}{} +} + +// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after. +func nilAwareDelete(m *map[uint64]struct{}, id uint64) { + if *m == nil { + return + } + delete(*m, id) + if len(*m) == 0 { + *m = nil + } +} + +// symdiff returns the count of the symmetric difference between the sets of +// uint64s, i.e. len( (l - r) \union (r - l)). +func symdiff(l, r map[uint64]struct{}) int { + var n int + pairs := [][2]quorum.MajorityConfig{ + {l, r}, // count elems in l but not in r + {r, l}, // count elems in r but not in l + } + for _, p := range pairs { + for id := range p[0] { + if _, ok := p[1][id]; !ok { + n++ + } + } + } + return n +} + +func joint(cfg tracker.Config) bool { + return len(outgoing(cfg.Voters)) > 0 +} + +func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] } +func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] } +func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] } + +// Describe prints the type and NodeID of the configuration changes as a +// space-delimited string. +func Describe(ccs ...pb.ConfChangeSingle) string { + var buf strings.Builder + for _, cc := range ccs { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID) + } + return buf.String() +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/restore.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/restore.go new file mode 100644 index 00000000000..724068da00d --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/confchange/restore.go @@ -0,0 +1,155 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// toConfChangeSingle translates a conf state into 1) a slice of operations creating +// first the config that will become the outgoing one, and then the incoming one, and +// b) another slice that, when applied to the config resulted from 1), represents the +// ConfState. +func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) { + // Example to follow along this code: + // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4) + // + // This means that before entering the joint config, the configuration + // had voters (1 2 4) and perhaps some learners that are already gone. + // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6) + // are no longer voters; however 4 is poised to become a learner upon leaving + // the joint state. + // We can't tell whether 5 was a learner before entering the joint config, + // but it doesn't matter (we'll pretend that it wasn't). + // + // The code below will construct + // outgoing = add 1; add 2; add 4; add 6 + // incoming = remove 1; remove 2; remove 4; remove 6 + // add 1; add 2; add 3; + // add-learner 5; + // add-learner 4; + // + // So, when starting with an empty config, after applying 'outgoing' we have + // + // quorum=(1 2 4 6) + // + // From which we enter a joint state via 'incoming' + // + // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4) + // + // as desired. + + for _, id := range cs.VotersOutgoing { + // If there are outgoing voters, first add them one by one so that the + // (non-joint) config has them all. + out = append(out, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + + } + + // We're done constructing the outgoing slice, now on to the incoming one + // (which will apply on top of the config created by the outgoing slice). + + // First, we'll remove all of the outgoing voters. + for _, id := range cs.VotersOutgoing { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeRemoveNode, + NodeID: id, + }) + } + // Then we'll add the incoming voters and learners. + for _, id := range cs.Voters { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + for _, id := range cs.Learners { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + // Same for LearnersNext; these are nodes we want to be learners but which + // are currently voters in the outgoing config. + for _, id := range cs.LearnersNext { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + return out, in +} + +func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { + for _, op := range ops { + cfg, prs, err := op(chg) + if err != nil { + return tracker.Config{}, nil, err + } + chg.Tracker.Config = cfg + chg.Tracker.Progress = prs + } + return chg.Tracker.Config, chg.Tracker.Progress, nil +} + +// Restore takes a Changer (which must represent an empty configuration), and +// runs a sequence of changes enacting the configuration described in the +// ConfState. +// +// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure +// the Changer only needs a ProgressMap (not a whole Tracker) at which point +// this can just take LastIndex and MaxInflight directly instead and cook up +// the results from that alone. +func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) { + outgoing, incoming := toConfChangeSingle(cs) + + var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error) + + if len(outgoing) == 0 { + // No outgoing config, so just apply the incoming changes one by one. + for _, cc := range incoming { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + } else { + // The ConfState describes a joint configuration. + // + // First, apply all of the changes of the outgoing config one by one, so + // that it temporarily becomes the incoming active config. For example, + // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&(). + for _, cc := range outgoing { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + // Now enter the joint state, which rotates the above additions into the + // outgoing config, and adds the incoming config in. Continuing the + // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations + // would be removing 2,3,4 and then adding in 1,2,3 while transitioning + // into a joint state. + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.EnterJoint(cs.AutoLeave, incoming...) + }) + } + + return chain(chg, ops...) +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/design.md b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/design.md similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/design.md rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/design.md diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/doc.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/doc.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/doc.go index b55c591ff5d..68fe6f0a6ed 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/doc.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/doc.go @@ -19,11 +19,11 @@ defined in the raftpb package. Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. The state machine is kept in sync through the use of a replicated log. For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample +https://github.com/etcd-io/etcd/tree/master/contrib/raftexample Usage @@ -87,7 +87,7 @@ large). Note: Marshalling messages is not thread-safe; it is important that you make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside +The easiest way to achieve this is to serialize the messages directly inside your main raft loop. 3. Apply Snapshot (if any) and CommittedEntries to the state machine. @@ -153,7 +153,7 @@ If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; you may have to re-propose after a timeout. -To add or remove node in a cluster, build ConfChange struct 'cc' and call: +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) @@ -172,7 +172,7 @@ may be reused. Node IDs must be non-zero. Implementation notes This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the @@ -260,7 +260,7 @@ stale log entries: 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election protocol. When Config.PreVote is true, a pre-election is carried out first (using the same rules as a regular election), and no node increases its term - number unless the pre-election indicates that the campaigining node would win. + number unless the pre-election indicates that the campaigning node would win. This minimizes disruption when a partitioned node rejoins the cluster. 'MsgSnap' requests to install a snapshot message. When a node has just diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log.go similarity index 92% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log.go index c3036d3c90d..77eedfccbad 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log.go @@ -18,7 +18,7 @@ import ( "fmt" "log" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) type raftLog struct { @@ -38,17 +38,29 @@ type raftLog struct { applied uint64 logger Logger + + // maxNextEntsSize is the maximum number aggregate byte size of the messages + // returned from calls to nextEnts. + maxNextEntsSize uint64 } -// newLog returns log using the given storage. It recovers the log to the state -// that it just commits and applies the latest snapshot. +// newLog returns log using the given storage and default options. It +// recovers the log to the state that it just commits and applies the +// latest snapshot. func newLog(storage Storage, logger Logger) *raftLog { + return newLogWithSize(storage, logger, noLimit) +} + +// newLogWithSize returns a log using the given storage and max +// message size. +func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { if storage == nil { log.Panic("storage must not be nil") } log := &raftLog{ - storage: storage, - logger: logger, + storage: storage, + logger: logger, + maxNextEntsSize: maxNextEntsSize, } firstIndex, err := storage.FirstIndex() if err != nil { @@ -139,7 +151,7 @@ func (l *raftLog) unstableEntries() []pb.Entry { func (l *raftLog) nextEnts() (ents []pb.Entry) { off := max(l.applied+1, l.firstIndex()) if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, noLimit) + ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) if err != nil { l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) } @@ -320,8 +332,10 @@ func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { if hi > l.unstable.offset { unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) if len(ents) > 0 { - ents = append([]pb.Entry{}, ents...) - ents = append(ents, unstable...) + combined := make([]pb.Entry, len(ents)+len(unstable)) + n := copy(combined, ents) + copy(combined[n:], unstable) + ents = combined } else { ents = unstable } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log_unstable.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log_unstable.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log_unstable.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log_unstable.go index 263af9ce405..1bff5a7bdcb 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/log_unstable.go @@ -14,7 +14,7 @@ package raft -import pb "github.com/coreos/etcd/raft/raftpb" +import pb "go.etcd.io/etcd/raft/raftpb" // unstable.entries[i] has raft log position i+unstable.offset. // Note that unstable.offset may be less than the highest log @@ -55,10 +55,7 @@ func (u *unstable) maybeLastIndex() (uint64, bool) { // is any. func (u *unstable) maybeTerm(i uint64) (uint64, bool) { if i < u.offset { - if u.snapshot == nil { - return 0, false - } - if u.snapshot.Metadata.Index == i { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { return u.snapshot.Metadata.Term, true } return 0, false @@ -71,6 +68,7 @@ func (u *unstable) maybeTerm(i uint64) (uint64, bool) { if i > last { return 0, false } + return u.entries[i-u.offset].Term, true } @@ -147,7 +145,7 @@ func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { return u.entries[lo-u.offset : hi-u.offset] } -// u.offset <= lo <= hi <= u.offset+len(u.offset) +// u.offset <= lo <= hi <= u.offset+len(u.entries) func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { if lo > hi { u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/logger.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/logger.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/logger.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/logger.go index 426a77d3445..6d89629650d 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/logger.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/logger.go @@ -19,6 +19,7 @@ import ( "io/ioutil" "log" "os" + "sync" ) type Logger interface { @@ -41,11 +42,16 @@ type Logger interface { Panicf(format string, v ...interface{}) } -func SetLogger(l Logger) { raftLogger = l } +func SetLogger(l Logger) { + raftLoggerMu.Lock() + raftLogger = l + raftLoggerMu.Unlock() +} var ( defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLoggerMu sync.Mutex raftLogger = Logger(defaultLogger) ) diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/node.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/node.go similarity index 65% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/node.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/node.go index 33a9db84001..ab6185b99ec 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/node.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/node.go @@ -18,7 +18,7 @@ import ( "context" "errors" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) type SnapshotStatus int @@ -109,6 +109,19 @@ func (rd Ready) containsUpdates() bool { len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 } +// appliedCursor extracts from the Ready the highest index the client has +// applied (once the Ready is confirmed via Advance). If no information is +// contained in the Ready, returns zero. +func (rd Ready) appliedCursor() uint64 { + if n := len(rd.CommittedEntries); n > 0 { + return rd.CommittedEntries[n-1].Index + } + if index := rd.Snapshot.Metadata.Index; index > 0 { + return index + } + return 0 +} + // Node represents a node in a raft cluster. type Node interface { // Tick increments the internal logical clock for the Node by a single tick. Election @@ -116,12 +129,23 @@ type Node interface { Tick() // Campaign causes the Node to transition to candidate state and start campaigning to become leader. Campaign(ctx context.Context) error - // Propose proposes that data be appended to the log. + // Propose proposes that data be appended to the log. Note that proposals can be lost without + // notice, therefore it is user's job to ensure proposal retries. Propose(ctx context.Context, data []byte) error - // ProposeConfChange proposes config change. - // At most one ConfChange can be in the process of going through consensus. - // Application needs to call ApplyConfChange when applying EntryConfChange type entry. - ProposeConfChange(ctx context.Context, cc pb.ConfChange) error + // ProposeConfChange proposes a configuration change. Like any proposal, the + // configuration change may be dropped with or without an error being + // returned. In particular, configuration changes are dropped unless the + // leader has certainty that there is no prior unapplied configuration + // change in its log. + // + // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2 + // message. The latter allows arbitrary configuration changes via joint + // consensus, notably including replacing a voter. Passing a ConfChangeV2 + // message is only allowed if all Nodes participating in the cluster run a + // version of this library aware of the V2 API. See pb.ConfChangeV2 for + // usage details and semantics. + ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. Step(ctx context.Context, msg pb.Message) error @@ -142,11 +166,13 @@ type Node interface { // a long time to apply the snapshot data. To continue receiving Ready without blocking raft // progress, it can call Advance before finishing applying the last ready. Advance() - // ApplyConfChange applies config change to the local node. - // Returns an opaque ConfState protobuf which must be recorded - // in snapshots. Will never return nil; it returns a pointer only - // to match MemoryStorage.Compact. - ApplyConfChange(cc pb.ConfChange) *pb.ConfState + // ApplyConfChange applies a config change (previously passed to + // ProposeConfChange) to the node. This must be called whenever a config + // change is observed in Ready.CommittedEntries. + // + // Returns an opaque non-nil ConfState protobuf which must be recorded in + // snapshots. + ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState // TransferLeadership attempts to transfer leadership to the given transferee. TransferLeadership(ctx context.Context, lead, transferee uint64) @@ -161,7 +187,16 @@ type Node interface { Status() Status // ReportUnreachable reports the given node is not reachable for the last send. ReportUnreachable(id uint64) - // ReportSnapshot reports the status of the sent snapshot. + // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower + // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure. + // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a + // snapshot (for e.g., while streaming it from leader to follower), should be reported to the + // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft + // log probes until the follower can apply the snapshot and advance its state. If the follower + // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any + // updates from the leader. Therefore, it is crucial that the application ensures that any + // failure in snapshot sending is caught and reported back to the leader; so it can resume raft + // log probing in the follower. ReportSnapshot(id uint64, status SnapshotStatus) // Stop performs any necessary termination of the Node. Stop() @@ -174,40 +209,21 @@ type Peer struct { // StartNode returns a new Node given configuration and a list of raft peers. // It appends a ConfChangeAddNode entry for each given peer to the initial log. +// +// Peers must not be zero length; call RestartNode in that case. func StartNode(c *Config, peers []Peer) Node { - r := newRaft(c) - // become the follower at term 1 and apply initial configuration - // entries of term 1 - r.becomeFollower(1, None) - for _, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - d, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} - r.raftLog.append(e) + if len(peers) == 0 { + panic("no peers given; use RestartNode instead") } - // Mark these initial entries as committed. - // TODO(bdarnell): These entries are still unstable; do we need to preserve - // the invariant that committed < unstable? - r.raftLog.committed = r.raftLog.lastIndex() - // Now apply them, mainly so that the application can call Campaign - // immediately after StartNode in tests. Note that these nodes will - // be added to raft twice: here and when the application's Ready - // loop calls ApplyConfChange. The calls to addNode must come after - // all calls to raftLog.append so progress.next is set after these - // bootstrapping entries (it is an error if we try to append these - // entries since they have already been committed). - // We do not set raftLog.applied so the application will be able - // to observe all conf changes via Ready.CommittedEntries. - for _, peer := range peers { - r.addNode(peer.ID) + rn, err := NewRawNode(c) + if err != nil { + panic(err) } + rn.Bootstrap(peers) - n := newNode() - n.logger = c.Logger - go n.run(r) + n := newNode(rn) + + go n.run() return &n } @@ -216,19 +232,25 @@ func StartNode(c *Config, peers []Peer) Node { // If the caller has an existing state machine, pass in the last log index that // has been applied to it; otherwise use zero. func RestartNode(c *Config) Node { - r := newRaft(c) - - n := newNode() - n.logger = c.Logger - go n.run(r) + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + n := newNode(rn) + go n.run() return &n } +type msgWithResult struct { + m pb.Message + result chan error +} + // node is the canonical implementation of the Node interface type node struct { - propc chan pb.Message + propc chan msgWithResult recvc chan pb.Message - confc chan pb.ConfChange + confc chan pb.ConfChangeV2 confstatec chan pb.ConfState readyc chan Ready advancec chan struct{} @@ -237,14 +259,14 @@ type node struct { stop chan struct{} status chan chan Status - logger Logger + rn *RawNode } -func newNode() node { +func newNode(rn *RawNode) node { return node{ - propc: make(chan pb.Message), + propc: make(chan msgWithResult), recvc: make(chan pb.Message), - confc: make(chan pb.ConfChange), + confc: make(chan pb.ConfChangeV2), confstatec: make(chan pb.ConfState), readyc: make(chan Ready), advancec: make(chan struct{}), @@ -255,6 +277,7 @@ func newNode() node { done: make(chan struct{}), stop: make(chan struct{}), status: make(chan chan Status), + rn: rn, } } @@ -270,29 +293,30 @@ func (n *node) Stop() { <-n.done } -func (n *node) run(r *raft) { - var propc chan pb.Message +func (n *node) run() { + var propc chan msgWithResult var readyc chan Ready var advancec chan struct{} - var prevLastUnstablei, prevLastUnstablet uint64 - var havePrevLastUnstablei bool - var prevSnapi uint64 var rd Ready + r := n.rn.raft + lead := None - prevSoftSt := r.softState() - prevHardSt := emptyState for { if advancec != nil { readyc = nil - } else { - rd = newReady(r, prevSoftSt, prevHardSt) - if rd.containsUpdates() { - readyc = n.readyc - } else { - readyc = nil - } + } else if n.rn.HasReady() { + // Populate a Ready. Note that this Ready is not guaranteed to + // actually be handled. We will arm readyc, but there's no guarantee + // that we will actually send on it. It's possible that we will + // service another channel instead, loop around, and then populate + // the Ready again. We could instead force the previous Ready to be + // handled first, but it's generally good to emit larger Readys plus + // it simplifies testing (by emitting less frequently and more + // predictably). + rd = n.rn.readyWithoutAccept() + readyc = n.readyc } if lead != r.lead { @@ -314,74 +338,56 @@ func (n *node) run(r *raft) { // TODO: maybe buffer the config propose if there exists one (the way // described in raft dissertation) // Currently it is dropped in Step silently. - case m := <-propc: + case pm := <-propc: + m := pm.m m.From = r.id - r.Step(m) + err := r.Step(m) + if pm.result != nil { + pm.result <- err + close(pm.result) + } case m := <-n.recvc: // filter out response message from unknown From. - if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { - r.Step(m) // raft never returns an error + if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) } case cc := <-n.confc: - if cc.NodeID == None { - r.resetPendingConf() - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: + _, okBefore := r.prs.Progress[r.id] + cs := r.applyConfChange(cc) + // If the node was removed, block incoming proposals. Note that we + // only do this if the node was in the config before. Nodes may be + // a member of the group without knowing this (when they're catching + // up on the log and don't have the latest config) and we don't want + // to block the proposal channel in that case. + // + // NB: propc is reset when the leader changes, which, if we learn + // about it, sort of implies that we got readded, maybe? This isn't + // very sound and likely has bugs. + if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { + var found bool + for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { + for _, id := range sl { + if id == r.id { + found = true + } + } } - break - } - switch cc.Type { - case pb.ConfChangeAddNode: - r.addNode(cc.NodeID) - case pb.ConfChangeAddLearnerNode: - r.addLearner(cc.NodeID) - case pb.ConfChangeRemoveNode: - // block incoming proposal when local node is - // removed - if cc.NodeID == r.id { + if !found { propc = nil } - r.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - r.resetPendingConf() - default: - panic("unexpected conf type") } select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case n.confstatec <- cs: case <-n.done: } case <-n.tickc: - r.tick() + n.rn.Tick() case readyc <- rd: - if rd.SoftState != nil { - prevSoftSt = rd.SoftState - } - if len(rd.Entries) > 0 { - prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index - prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term - havePrevLastUnstablei = true - } - if !IsEmptyHardState(rd.HardState) { - prevHardSt = rd.HardState - } - if !IsEmptySnap(rd.Snapshot) { - prevSnapi = rd.Snapshot.Metadata.Index - } - - r.msgs = nil - r.readStates = nil + n.rn.acceptReady(rd) advancec = n.advancec case <-advancec: - if prevHardSt.Commit != 0 { - r.raftLog.appliedTo(prevHardSt.Commit) - } - if havePrevLastUnstablei { - r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) - havePrevLastUnstablei = false - } - r.raftLog.stableSnapTo(prevSnapi) + n.rn.Advance(rd) + rd = Ready{} advancec = nil case c := <-n.status: c <- getStatus(r) @@ -399,14 +405,14 @@ func (n *node) Tick() { case n.tickc <- struct{}{}: case <-n.done: default: - n.logger.Warningf("A tick missed to fire. Node blocks too long!") + n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead) } } func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } func (n *node) Propose(ctx context.Context, data []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) } func (n *node) Step(ctx context.Context, m pb.Message) error { @@ -418,30 +424,69 @@ func (n *node) Step(ctx context.Context, m pb.Message) error { return n.step(ctx, m) } -func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { - data, err := cc.Marshal() +func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) { + typ, data, err := pb.MarshalConfChange(c) + if err != nil { + return pb.Message{}, err + } + return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error { + msg, err := confChangeToMsg(cc) if err != nil { return err } - return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) + return n.Step(ctx, msg) +} + +func (n *node) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *node) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) } // Step advances the state machine using msgs. The ctx.Err() will be returned, // if any. -func (n *node) step(ctx context.Context, m pb.Message) error { - ch := n.recvc - if m.Type == pb.MsgProp { - ch = n.propc +func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + select { + case n.recvc <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + ch := n.propc + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) } - select { - case ch <- m: - return nil + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + select { + case err := <-pm.result: + if err != nil { + return err + } case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } + return nil } func (n *node) Ready() <-chan Ready { return n.readyc } @@ -453,10 +498,10 @@ func (n *node) Advance() { } } -func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { +func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { var cs pb.ConfState select { - case n.confc <- cc: + case n.confc <- cc.AsV2(): case <-n.done: } select { @@ -523,7 +568,7 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } - rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) + rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) return rd } diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/joint.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/joint.go new file mode 100644 index 00000000000..e3741e0b0a9 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/joint.go @@ -0,0 +1,75 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +// JointConfig is a configuration of two groups of (possibly overlapping) +// majority configurations. Decisions require the support of both majorities. +type JointConfig [2]MajorityConfig + +func (c JointConfig) String() string { + if len(c[1]) > 0 { + return c[0].String() + "&&" + c[1].String() + } + return c[0].String() +} + +// IDs returns a newly initialized map representing the set of voters present +// in the joint configuration. +func (c JointConfig) IDs() map[uint64]struct{} { + m := map[uint64]struct{}{} + for _, cc := range c { + for id := range cc { + m[id] = struct{}{} + } + } + return m +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c JointConfig) Describe(l AckedIndexer) string { + return MajorityConfig(c.IDs()).Describe(l) +} + +// CommittedIndex returns the largest committed index for the given joint +// quorum. An index is jointly committed if it is committed in both constituent +// majorities. +func (c JointConfig) CommittedIndex(l AckedIndexer) Index { + idx0 := c[0].CommittedIndex(l) + idx1 := c[1].CommittedIndex(l) + if idx0 < idx1 { + return idx0 + } + return idx1 +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending, lost, or won. A joint quorum +// requires both majority quorums to vote in favor. +func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult { + r1 := c[0].VoteResult(votes) + r2 := c[1].VoteResult(votes) + + if r1 == r2 { + // If they agree, return the agreed state. + return r1 + } + if r1 == VoteLost || r2 == VoteLost { + // If either config has lost, loss is the only possible outcome. + return VoteLost + } + // One side won, the other one is pending, so the whole outcome is. + return VotePending +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/majority.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/majority.go new file mode 100644 index 00000000000..8858a36b634 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/majority.go @@ -0,0 +1,210 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "fmt" + "math" + "sort" + "strings" +) + +// MajorityConfig is a set of IDs that uses majority quorums to make decisions. +type MajorityConfig map[uint64]struct{} + +func (c MajorityConfig) String() string { + sl := make([]uint64, 0, len(c)) + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + var buf strings.Builder + buf.WriteByte('(') + for i := range sl { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprint(&buf, sl[i]) + } + buf.WriteByte(')') + return buf.String() +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c MajorityConfig) Describe(l AckedIndexer) string { + if len(c) == 0 { + return "" + } + type tup struct { + id uint64 + idx Index + ok bool // idx found? + bar int // length of bar displayed for this tup + } + + // Below, populate .bar so that the i-th largest commit index has bar i (we + // plot this as sort of a progress bar). The actual code is a bit more + // complicated and also makes sure that equal index => equal bar. + + n := len(c) + info := make([]tup, 0, n) + for id := range c { + idx, ok := l.AckedIndex(id) + info = append(info, tup{id: id, idx: idx, ok: ok}) + } + + // Sort by index + sort.Slice(info, func(i, j int) bool { + if info[i].idx == info[j].idx { + return info[i].id < info[j].id + } + return info[i].idx < info[j].idx + }) + + // Populate .bar. + for i := range info { + if i > 0 && info[i-1].idx < info[i].idx { + info[i].bar = i + } + } + + // Sort by ID. + sort.Slice(info, func(i, j int) bool { + return info[i].id < info[j].id + }) + + var buf strings.Builder + + // Print. + fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n") + for i := range info { + bar := info[i].bar + if !info[i].ok { + fmt.Fprint(&buf, "?"+strings.Repeat(" ", n)) + } else { + fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar)) + } + fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id) + } + return buf.String() +} + +// Slice returns the MajorityConfig as a sorted slice. +func (c MajorityConfig) Slice() []uint64 { + var sl []uint64 + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// CommittedIndex computes the committed index from those supplied via the +// provided AckedIndexer (for the active config). +func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { + n := len(c) + if n == 0 { + // This plays well with joint quorums which, when one half is the zero + // MajorityConfig, should behave like the other half. + return math.MaxUint64 + } + + // Use an on-stack slice to collect the committed indexes when n <= 7 + // (otherwise we alloc). The alternative is to stash a slice on + // MajorityConfig, but this impairs usability (as is, MajorityConfig is just + // a map, and that's nice). The assumption is that running with a + // replication factor of >7 is rare, and in cases in which it happens + // performance is a lesser concern (additionally the performance + // implications of an allocation here are far from drastic). + var stk [7]uint64 + var srt []uint64 + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]uint64, n) + } + + { + // Fill the slice with the indexes observed. Any unused slots will be + // left as zero; these correspond to voters that may report in, but + // haven't yet. We fill from the right (since the zeroes will end up on + // the left after sorting below anyway). + i := n - 1 + for id := range c { + if idx, ok := l.AckedIndex(id); ok { + srt[i] = uint64(idx) + i-- + } + } + } + + // Sort by index. Use a bespoke algorithm (copied from the stdlib's sort + // package) to keep srt on the stack. + insertionSort(srt) + + // The smallest index into the array for which the value is acked by a + // quorum. In other words, from the end of the slice, move n/2+1 to the + // left (accounting for zero-indexing). + pos := n - (n/2 + 1) + return Index(srt[pos]) +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending (i.e. neither a quorum of +// yes/no has been reached), won (a quorum of yes has been reached), or lost (a +// quorum of no has been reached). +func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { + if len(c) == 0 { + // By convention, the elections on an empty config win. This comes in + // handy with joint quorums because it'll make a half-populated joint + // quorum behave like a majority quorum. + return VoteWon + } + + ny := [2]int{} // vote counts for no and yes, respectively + + var missing int + for id := range c { + v, ok := votes[id] + if !ok { + missing++ + continue + } + if v { + ny[1]++ + } else { + ny[0]++ + } + } + + q := len(c)/2 + 1 + if ny[1] >= q { + return VoteWon + } + if ny[1]+missing >= q { + return VotePending + } + return VoteLost +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/quorum.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/quorum.go new file mode 100644 index 00000000000..2899e46c96d --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/quorum.go @@ -0,0 +1,58 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "math" + "strconv" +) + +// Index is a Raft log position. +type Index uint64 + +func (i Index) String() string { + if i == math.MaxUint64 { + return "∞" + } + return strconv.FormatUint(uint64(i), 10) +} + +// AckedIndexer allows looking up a commit index for a given ID of a voter +// from a corresponding MajorityConfig. +type AckedIndexer interface { + AckedIndex(voterID uint64) (idx Index, found bool) +} + +type mapAckIndexer map[uint64]Index + +func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) { + idx, ok := m[id] + return idx, ok +} + +// VoteResult indicates the outcome of a vote. +// +//go:generate stringer -type=VoteResult +type VoteResult uint8 + +const ( + // VotePending indicates that the decision of the vote depends on future + // votes, i.e. neither "yes" or "no" has reached quorum yet. + VotePending VoteResult = 1 + iota + // VoteLost indicates that the quorum has voted "no". + VoteLost + // VoteWon indicates that the quorum has voted "yes". + VoteWon +) diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go new file mode 100644 index 00000000000..9eca8fd0c96 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=VoteResult"; DO NOT EDIT. + +package quorum + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VotePending-1] + _ = x[VoteLost-2] + _ = x[VoteWon-3] +} + +const _VoteResult_name = "VotePendingVoteLostVoteWon" + +var _VoteResult_index = [...]uint8{0, 11, 19, 26} + +func (i VoteResult) String() string { + i -= 1 + if i >= VoteResult(len(_VoteResult_index)-1) { + return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]] +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raft.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raft.go similarity index 59% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raft.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raft.go index 22ff138e9c4..d3c3f42574b 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raft.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raft.go @@ -25,7 +25,10 @@ import ( "sync" "time" - pb "github.com/coreos/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/confchange" + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) // None is a placeholder node ID used when there is no leader. @@ -67,9 +70,13 @@ const ( campaignTransfer CampaignType = "CampaignTransfer" ) +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + // lockedRand is a small wrapper around rand.Rand to provide -// synchronization. Only the methods needed by the code are exposed -// (e.g. Intn). +// synchronization among multiple raft groups. Only the methods needed +// by the code are exposed (e.g. Intn). type lockedRand struct { mu sync.Mutex rand *rand.Rand @@ -116,8 +123,9 @@ type Config struct { // used for testing right now. peers []uint64 - // learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster. - // learners only receives entries from the leader node. It does not vote or promote itself. + // learners contains the IDs of all learner nodes (including self if the + // local node is a learner) in the raft cluster. learners only receives + // entries from the leader node. It does not vote or promote itself. learners []uint64 // ElectionTick is the number of Node.Tick invocations that must pass between @@ -143,12 +151,20 @@ type Config struct { // applied entries. This is a very application dependent configuration. Applied uint64 - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. + // MaxSizePerMsg limits the max byte size of each append message. Smaller + // value lowers the raft recovery cost(initial probing and message lost + // during normal operation). On the other side, it might affect the + // throughput during normal replication. Note: math.MaxUint64 for unlimited, + // 0 for at most one entry per message. MaxSizePerMsg uint64 + // MaxCommittedSizePerReady limits the size of the committed entries which + // can be applied. + MaxCommittedSizePerReady uint64 + // MaxUncommittedEntriesSize limits the aggregate byte size of the + // uncommitted entries that may be appended to a leader's log. Once this + // limit is exceeded, proposals will begin to return ErrProposalDropped + // errors. Note: 0 for no limit. + MaxUncommittedEntriesSize uint64 // MaxInflightMsgs limits the max number of in-flight append messages during // optimistic replication phase. The application transportation layer usually // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid @@ -187,7 +203,7 @@ type Config struct { // this feature would be in a situation where the Raft leader is used to // compute the data of a proposal, for example, adding a timestamp from a // hybrid logical clock to data in a monotonically increasing way. Forwarding - // should be disabled to prevent a follower with an innaccurate hybrid + // should be disabled to prevent a follower with an inaccurate hybrid // logical clock from assigning the timestamp and then forwarding the data // to the leader. DisableProposalForwarding bool @@ -210,6 +226,16 @@ func (c *Config) validate() error { return errors.New("storage cannot be nil") } + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + + // default MaxCommittedSizePerReady to MaxSizePerMsg because they were + // previously the same parameter. + if c.MaxCommittedSizePerReady == 0 { + c.MaxCommittedSizePerReady = c.MaxSizePerMsg + } + if c.MaxInflightMsgs <= 0 { return errors.New("max inflight messages must be greater than 0") } @@ -236,18 +262,16 @@ type raft struct { // the log raftLog *raftLog - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - learnerPrs map[uint64]*Progress + maxMsgSize uint64 + maxUncommittedSize uint64 + // TODO(tbg): rename to trk. + prs tracker.ProgressTracker state StateType // isLearner is true if the local raft node is a learner. isLearner bool - votes map[uint64]bool - msgs []pb.Message // the leader id @@ -255,8 +279,17 @@ type raft struct { // leadTransferee is id of the leader transfer target when its value is not zero. // Follow the procedure defined in raft thesis 3.10. leadTransferee uint64 - // New configuration is ignored if there exists unapplied configuration. - pendingConf bool + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via pendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + pendingConfIndex uint64 + // an estimate of the size of the uncommitted tail of the Raft log. Used to + // prevent unbounded log growth. Only maintained by the leader. Reset on + // term changes. + uncommittedSize uint64 readOnly *readOnly @@ -291,32 +324,31 @@ func newRaft(c *Config) *raft { if err := c.validate(); err != nil { panic(err.Error()) } - raftlog := newLog(c.Storage, c.Logger) + raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) hs, cs, err := c.Storage.InitialState() if err != nil { panic(err) // TODO(bdarnell) } - peers := c.peers - learners := c.learners - if len(cs.Nodes) > 0 || len(cs.Learners) > 0 { - if len(peers) > 0 || len(learners) > 0 { + + if len(c.peers) > 0 || len(c.learners) > 0 { + if len(cs.Voters) > 0 || len(cs.Learners) > 0 { // TODO(bdarnell): the peers argument is always nil except in // tests; the argument should be removed and these tests should be // updated to specify their nodes through a snapshot. - panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)") + panic("cannot specify both newRaft(peers, learners) and ConfState.(Voters, Learners)") } - peers = cs.Nodes - learners = cs.Learners + cs.Voters = c.peers + cs.Learners = c.learners } + r := &raft{ id: c.ID, lead: None, isLearner: false, raftLog: raftlog, maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - learnerPrs: make(map[uint64]*Progress), + maxUncommittedSize: c.MaxUncommittedEntriesSize, + prs: tracker.MakeProgressTracker(c.MaxInflightMsgs), electionTimeout: c.ElectionTick, heartbeatTimeout: c.HeartbeatTick, logger: c.Logger, @@ -325,20 +357,17 @@ func newRaft(c *Config) *raft { readOnly: newReadOnly(c.ReadOnlyOption), disableProposalForwarding: c.DisableProposalForwarding, } - for _, p := range peers { - r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} - } - for _, p := range learners { - if _, ok := r.prs[p]; ok { - panic(fmt.Sprintf("node %x is in both learner and peer list", p)) - } - r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true} - if r.id == p { - r.isLearner = true - } + + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: raftlog.lastIndex(), + }, cs) + if err != nil { + panic(err) } + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) - if !isHardStateEqual(hs, emptyState) { + if !IsEmptyHardState(hs) { r.loadState(hs) } if c.Applied > 0 { @@ -347,7 +376,7 @@ func newRaft(c *Config) *raft { r.becomeFollower(r.Term, None) var nodesStrs []string - for _, n := range r.nodes() { + for _, n := range r.prs.VoterNodes() { nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) } @@ -368,20 +397,6 @@ func (r *raft) hardState() pb.HardState { } } -func (r *raft) quorum() int { return len(r.prs)/2 + 1 } - -func (r *raft) nodes() []uint64 { - nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs)) - for id := range r.prs { - nodes = append(nodes, id) - } - for id := range r.learnerPrs { - nodes = append(nodes, id) - } - sort.Sort(uint64Slice(nodes)) - return nodes -} - // send persists state to stable storage and then sends to its mailbox. func (r *raft) send(m pb.Message) { m.From = r.id @@ -416,30 +431,35 @@ func (r *raft) send(m pb.Message) { r.msgs = append(r.msgs, m) } -func (r *raft) getProgress(id uint64) *Progress { - if pr, ok := r.prs[id]; ok { - return pr - } - - return r.learnerPrs[id] +// sendAppend sends an append RPC with new entries (if any) and the +// current commit index to the given peer. +func (r *raft) sendAppend(to uint64) { + r.maybeSendAppend(to, true) } -// sendAppend sends RPC, with entries to the given peer. -func (r *raft) sendAppend(to uint64) { - pr := r.getProgress(to) +// maybeSendAppend sends an append RPC with new entries to the given peer, +// if necessary. Returns true if a message was sent. The sendIfEmpty +// argument controls whether messages with no entries will be sent +// ("empty" messages are useful to convey updated Commit indexes, but +// are undesirable when we're sending multiple messages in a batch). +func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { + pr := r.prs.Progress[to] if pr.IsPaused() { - return + return false } m := pb.Message{} m.To = to term, errt := r.raftLog.term(pr.Next - 1) ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + if len(ents) == 0 && !sendIfEmpty { + return false + } if errt != nil || erre != nil { // send snapshot if we failed to get term or entries if !pr.RecentActive { r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) - return + return false } m.Type = pb.MsgSnap @@ -447,7 +467,7 @@ func (r *raft) sendAppend(to uint64) { if err != nil { if err == ErrSnapshotTemporarilyUnavailable { r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) - return + return false } panic(err) // TODO(bdarnell) } @@ -458,7 +478,7 @@ func (r *raft) sendAppend(to uint64) { sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) - pr.becomeSnapshot(sindex) + pr.BecomeSnapshot(sindex) r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) } else { m.Type = pb.MsgApp @@ -468,22 +488,23 @@ func (r *raft) sendAppend(to uint64) { m.Commit = r.raftLog.committed if n := len(m.Entries); n != 0 { switch pr.State { - // optimistically increase the next when in ProgressStateReplicate - case ProgressStateReplicate: + // optimistically increase the next when in StateReplicate + case tracker.StateReplicate: last := m.Entries[n-1].Index - pr.optimisticUpdate(last) - pr.ins.add(last) - case ProgressStateProbe: - pr.pause() + pr.OptimisticUpdate(last) + pr.Inflights.Add(last) + case tracker.StateProbe: + pr.ProbeSent = true default: r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) } } } r.send(m) + return true } -// sendHeartbeat sends an empty MsgApp +// sendHeartbeat sends a heartbeat RPC to the given peer. func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // Attach the commit as min(to.matched, r.committed). // When the leader sends out heartbeat message, @@ -491,7 +512,7 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // or it might not have all the committed entries. // The leader MUST NOT forward the follower's commit to // an unmatched index. - commit := min(r.getProgress(to).Match, r.raftLog.committed) + commit := min(r.prs.Progress[to].Match, r.raftLog.committed) m := pb.Message{ To: to, Type: pb.MsgHeartbeat, @@ -502,24 +523,13 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { r.send(m) } -func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) { - for id, pr := range r.prs { - f(id, pr) - } - - for id, pr := range r.learnerPrs { - f(id, pr) - } -} - // bcastAppend sends RPC, with entries to all peers that are not up-to-date // according to the progress recorded in r.prs. func (r *raft) bcastAppend() { - r.forEachProgress(func(id uint64, _ *Progress) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } - r.sendAppend(id) }) } @@ -535,7 +545,7 @@ func (r *raft) bcastHeartbeat() { } func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { - r.forEachProgress(func(id uint64, _ *Progress) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } @@ -543,17 +553,51 @@ func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { }) } +func (r *raft) advance(rd Ready) { + // If entries were applied (or a snapshot), update our cursor for + // the next Ready. Note that if the current HardState contains a + // new Commit index, this does not mean that we're also applying + // all of the new entries due to commit pagination by size. + if index := rd.appliedCursor(); index > 0 { + r.raftLog.appliedTo(index) + if r.prs.Config.AutoLeave && index >= r.pendingConfIndex && r.state == StateLeader { + // If the current (and most recent, at least for this leader's term) + // configuration should be auto-left, initiate that now. + ccdata, err := (&pb.ConfChangeV2{}).Marshal() + if err != nil { + panic(err) + } + ent := pb.Entry{ + Type: pb.EntryConfChangeV2, + Data: ccdata, + } + if !r.appendEntry(ent) { + // If we could not append the entry, bump the pending conf index + // so that we'll try again later. + // + // TODO(tbg): test this case. + r.pendingConfIndex = r.raftLog.lastIndex() + } else { + r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config) + } + } + } + r.reduceUncommittedSize(rd.CommittedEntries) + + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + r.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } +} + // maybeCommit attempts to advance the commit index. Returns true if // the commit index changed (in which case the caller should call // r.bcastAppend). func (r *raft) maybeCommit() bool { - // TODO(bmizerany): optimize.. Currently naive - mis := make(uint64Slice, 0, len(r.prs)) - for _, p := range r.prs { - mis = append(mis, p.Match) - } - sort.Sort(sort.Reverse(mis)) - mci := mis[r.quorum()-1] + mci := r.prs.Committed() return r.raftLog.maybeCommit(mci, r.Term) } @@ -570,28 +614,45 @@ func (r *raft) reset(term uint64) { r.abortLeaderTransfer() - r.votes = make(map[uint64]bool) - r.forEachProgress(func(id uint64, pr *Progress) { - *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner} + r.prs.ResetVotes() + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + *pr = tracker.Progress{ + Match: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.prs.MaxInflight), + IsLearner: pr.IsLearner, + } if id == r.id { pr.Match = r.raftLog.lastIndex() } }) - r.pendingConf = false + r.pendingConfIndex = 0 + r.uncommittedSize = 0 r.readOnly = newReadOnly(r.readOnly.option) } -func (r *raft) appendEntry(es ...pb.Entry) { +func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { li := r.raftLog.lastIndex() for i := range es { es[i].Term = r.Term es[i].Index = li + 1 + uint64(i) } - r.raftLog.append(es...) - r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex()) + // Track the size of this uncommitted proposal. + if !r.increaseUncommittedSize(es) { + r.logger.Debugf( + "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", + r.id, + ) + // Drop the proposal. + return false + } + // use latest "last" index after truncate/append + li = r.raftLog.append(es...) + r.prs.Progress[r.id].MaybeUpdate(li) // Regardless of maybeCommit's return, our caller will call bcastAppend. r.maybeCommit() + return true } // tickElection is run by followers and candidates after r.electionTimeout. @@ -661,7 +722,7 @@ func (r *raft) becomePreCandidate() { // but doesn't change anything else. In particular it does not increase // r.Term or change r.Vote. r.step = stepCandidate - r.votes = make(map[uint64]bool) + r.prs.ResetVotes() r.tick = r.tickElection r.lead = None r.state = StatePreCandidate @@ -678,24 +739,40 @@ func (r *raft) becomeLeader() { r.tick = r.tickHeartbeat r.lead = r.id r.state = StateLeader - ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) - } - - nconf := numOfPendingConf(ents) - if nconf > 1 { - panic("unexpected multiple uncommitted config entry") - } - if nconf == 1 { - r.pendingConf = true - } - - r.appendEntry(pb.Entry{Data: nil}) + // Followers enter replicate mode when they've been successfully probed + // (perhaps after having received a snapshot as a result). The leader is + // trivially in this state. Note that r.reset() has initialized this + // progress with the last index already. + r.prs.Progress[r.id].BecomeReplicate() + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + r.pendingConfIndex = r.raftLog.lastIndex() + + emptyEnt := pb.Entry{Data: nil} + if !r.appendEntry(emptyEnt) { + // This won't happen because we just called reset() above. + r.logger.Panic("empty entry was dropped") + } + // As a special case, don't count the initial empty entry towards the + // uncommitted log quota. This is because we want to preserve the + // behavior of allowing one entry larger than quota if the current + // usage is zero. + r.reduceUncommittedSize([]pb.Entry{emptyEnt}) r.logger.Infof("%x became leader at term %d", r.id, r.Term) } +// campaign transitions the raft instance to candidate state. This must only be +// called after verifying that this is a legitimate transition. func (r *raft) campaign(t CampaignType) { + if !r.promotable() { + // This path should not be hit (callers are supposed to check), but + // better safe than sorry. + r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id) + } var term uint64 var voteMsg pb.MessageType if t == campaignPreElection { @@ -708,7 +785,7 @@ func (r *raft) campaign(t CampaignType) { voteMsg = pb.MsgVote term = r.Term } - if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { + if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon { // We won the election after voting for ourselves (which must mean that // this is a single-node cluster). Advance to the next state. if t == campaignPreElection { @@ -718,7 +795,16 @@ func (r *raft) campaign(t CampaignType) { } return } - for id := range r.prs { + var ids []uint64 + { + idMap := r.prs.Voters.IDs() + ids = make([]uint64, 0, len(idMap)) + for id := range idMap { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + } + for _, id := range ids { if id == r.id { continue } @@ -733,21 +819,14 @@ func (r *raft) campaign(t CampaignType) { } } -func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) { if v { r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) } else { r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) } - if _, ok := r.votes[id]; !ok { - r.votes[id] = v - } - for _, vv := range r.votes { - if vv { - granted++ - } - } - return granted + r.prs.RecordVote(id, v) + return r.prs.TallyVotes() } func (r *raft) Step(m pb.Message) error { @@ -787,7 +866,7 @@ func (r *raft) Step(m pb.Message) error { } case m.Term < r.Term: - if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { // We have received messages from a leader at a lower term. It is possible // that these messages were simply delayed in the network, but this could // also mean that this node has advanced its term number during a network @@ -800,8 +879,23 @@ func (r *raft) Step(m pb.Message) error { // nodes that have been removed from the cluster's configuration: a // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, // but it will not receive MsgApp or MsgHeartbeat, so it will not create - // disruptive term increases + // disruptive term increases, by notifying leader of this node's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck node with + // fresh election. This can be prevented with Pre-Vote phase. r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) } else { // ignore other cases r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", @@ -813,6 +907,10 @@ func (r *raft) Step(m pb.Message) error { switch m.Type { case pb.MsgHup: if r.state != StateLeader { + if !r.promotable() { + r.logger.Warningf("%x is unpromotable and can not campaign; ignoring MsgHup", r.id) + return nil + } ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) if err != nil { r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) @@ -833,21 +931,38 @@ func (r *raft) Step(m pb.Message) error { } case pb.MsgVote, pb.MsgPreVote: - if r.isLearner { - // TODO: learner may need to vote, in case of node down when confchange. - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - return nil - } - // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should - // always equal r.Term. - if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // We can vote if this is a repeat of a vote we've already cast... + canVote := r.Vote == m.From || + // ...we haven't voted and we don't think there's a leader yet in this term... + (r.Vote == None && r.lead == None) || + // ...or this is a PreVote for a future term... + (m.Type == pb.MsgPreVote && m.Term > r.Term) + // ...and we believe the candidate is up to date. + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // Note: it turns out that that learners must be allowed to cast votes. + // This seems counter- intuitive but is necessary in the situation in which + // a learner has been promoted (i.e. is now a voter) but has not learned + // about this yet. + // For example, consider a group in which id=1 is a learner and id=2 and + // id=3 are voters. A configuration change promoting 1 can be committed on + // the quorum `{2,3}` without the config change being appended to the + // learner's log. If the leader (say 2) fails, there are de facto two + // voters remaining. Only 3 can win an election (due to its log containing + // all committed entries), but to do so it will need 1 to vote. But 1 + // considers itself a learner and will continue to do so until 3 has + // stepped up as leader, replicates the conf change to 1, and 1 applies it. + // Ultimately, by receiving a request to vote, the learner realizes that + // the candidate believes it to be a voter, and that it should act + // accordingly. The candidate's config may be stale, too; but in that case + // it won't win the election, at least in the absence of the bug discussed + // in: + // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263. r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) // When responding to Msg{Pre,}Vote messages we include the term - // from the message, not the local term. To see why consider the + // from the message, not the local term. To see why, consider the // case where a single node was previously partitioned away and - // it's local term is now of date. If we include the local term + // it's local term is now out of date. If we include the local term // (recall that for pre-votes we don't update the local term), the // (pre-)campaigning node on the other end will proceed to ignore // the message (it ignores all out of date messages). @@ -866,57 +981,110 @@ func (r *raft) Step(m pb.Message) error { } default: - r.step(r, m) + err := r.step(r, m) + if err != nil { + return err + } } return nil } -type stepFunc func(r *raft, m pb.Message) +type stepFunc func(r *raft, m pb.Message) error -func stepLeader(r *raft, m pb.Message) { +func stepLeader(r *raft, m pb.Message) error { // These message types do not require any progress for m.From. switch m.Type { case pb.MsgBeat: r.bcastHeartbeat() - return + return nil case pb.MsgCheckQuorum: - if !r.checkQuorumActive() { + // The leader should always see itself as active. As a precaution, handle + // the case in which the leader isn't in the configuration any more (for + // example if it just removed itself). + // + // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the + // leader steps down when removing itself. I might be missing something. + if pr := r.prs.Progress[r.id]; pr != nil { + pr.RecentActive = true + } + if !r.prs.QuorumActive() { r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) r.becomeFollower(r.Term, None) } - return + // Mark everyone (but ourselves) as inactive in preparation for the next + // CheckQuorum. + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + if id != r.id { + pr.RecentActive = false + } + }) + return nil case pb.MsgProp: if len(m.Entries) == 0 { r.logger.Panicf("%x stepped empty MsgProp", r.id) } - if _, ok := r.prs[r.id]; !ok { + if r.prs.Progress[r.id] == nil { // If we are not currently a member of the range (i.e. this node // was removed from the configuration while serving as leader), // drop any new proposals. - return + return ErrProposalDropped } if r.leadTransferee != None { r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) - return + return ErrProposalDropped } - for i, e := range m.Entries { + for i := range m.Entries { + e := &m.Entries[i] + var cc pb.ConfChangeI if e.Type == pb.EntryConfChange { - if r.pendingConf { - r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String()) + var ccc pb.ConfChange + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } else if e.Type == pb.EntryConfChangeV2 { + var ccc pb.ConfChangeV2 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } + if cc != nil { + alreadyPending := r.pendingConfIndex > r.raftLog.applied + alreadyJoint := len(r.prs.Config.Voters[1]) > 0 + wantsLeaveJoint := len(cc.AsV2().Changes) == 0 + + var refused string + if alreadyPending { + refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) + } else if alreadyJoint && !wantsLeaveJoint { + refused = "must transition out of joint config first" + } else if !alreadyJoint && wantsLeaveJoint { + refused = "not in joint state; refusing empty conf change" + } + + if refused != "" { + r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused) m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 } - r.pendingConf = true } } - r.appendEntry(m.Entries...) + + if !r.appendEntry(m.Entries...) { + return ErrProposalDropped + } r.bcastAppend() - return + return nil case pb.MsgReadIndex: - if r.quorum() > 1 { + // If more than the local vote is needed, go through a full broadcast, + // otherwise optimize. + if !r.prs.IsSingleton() { if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { // Reject read only request when this leader has not committed any log entry at its term. - return + return nil } // thinking: use an interally defined context instead of the user given context. @@ -925,62 +1093,85 @@ func stepLeader(r *raft, m pb.Message) { switch r.readOnly.option { case ReadOnlySafe: r.readOnly.addRequest(r.raftLog.committed, m) + // The local node automatically acks the request. + r.readOnly.recvAck(r.id, m.Entries[0].Data) r.bcastHeartbeatWithCtx(m.Entries[0].Data) case ReadOnlyLeaseBased: ri := r.raftLog.committed if m.From == None || m.From == r.id { // from local member - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + r.readStates = append(r.readStates, ReadState{Index: ri, RequestCtx: m.Entries[0].Data}) } else { r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) } } - } else { - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // only one voting member (the leader) in the cluster + if m.From == None || m.From == r.id { // from leader itself + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // from learner member + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries}) + } } - return + return nil } // All other message types require a progress for m.From (pr). - pr := r.getProgress(m.From) + pr := r.prs.Progress[m.From] if pr == nil { r.logger.Debugf("%x no progress available for %x", r.id, m.From) - return + return nil } switch m.Type { case pb.MsgAppResp: pr.RecentActive = true if m.Reject { - r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", + r.logger.Debugf("%x received MsgAppResp(MsgApp was rejected, lastindex: %d) from %x for index %d", r.id, m.RejectHint, m.From, m.Index) - if pr.maybeDecrTo(m.Index, m.RejectHint) { + if pr.MaybeDecrTo(m.Index, m.RejectHint) { r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) - if pr.State == ProgressStateReplicate { - pr.becomeProbe() + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() } r.sendAppend(m.From) } } else { oldPaused := pr.IsPaused() - if pr.maybeUpdate(m.Index) { + if pr.MaybeUpdate(m.Index) { switch { - case pr.State == ProgressStateProbe: - pr.becomeReplicate() - case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): - r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - pr.becomeProbe() - case pr.State == ProgressStateReplicate: - pr.ins.freeTo(m.Index) + case pr.State == tracker.StateProbe: + pr.BecomeReplicate() + case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: + // TODO(tbg): we should also enter this branch if a snapshot is + // received that is below pr.PendingSnapshot but which makes it + // possible to use the log again. + r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + // Transition back to replicating state via probing state + // (which takes the snapshot into account). If we didn't + // move to replicating state, that would only happen with + // the next round of appends (but there may not be a next + // round for a while, exposing an inconsistent RaftStatus). + pr.BecomeProbe() + pr.BecomeReplicate() + case pr.State == tracker.StateReplicate: + pr.Inflights.FreeLE(m.Index) } if r.maybeCommit() { r.bcastAppend() } else if oldPaused { - // update() reset the wait state on this node. If we had delayed sending - // an update before, send it now. + // If we were paused before, this node may be missing the + // latest commit index, so send it. r.sendAppend(m.From) } + // We've updated flow control information above, which may + // allow us to send multiple (size-limited) in-flight messages + // at once (such as when transitioning from probe to + // replicate, or when freeTo() covers multiple messages). If + // we have more entries to send, send as many messages as we + // can (without sending empty messages for the commit index) + for r.maybeSendAppend(m.From, false) { + } // Transfer leadership is in progress. if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) @@ -990,23 +1181,22 @@ func stepLeader(r *raft, m pb.Message) { } case pb.MsgHeartbeatResp: pr.RecentActive = true - pr.resume() + pr.ProbeSent = false // free one slot for the full inflights window to allow progress. - if pr.State == ProgressStateReplicate && pr.ins.full() { - pr.ins.freeFirstOne() + if pr.State == tracker.StateReplicate && pr.Inflights.Full() { + pr.Inflights.FreeFirstOne() } if pr.Match < r.raftLog.lastIndex() { r.sendAppend(m.From) } if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { - return + return nil } - ackCount := r.readOnly.recvAck(m) - if ackCount < r.quorum() { - return + if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { + return nil } rss := r.readOnly.advance(m) @@ -1019,32 +1209,38 @@ func stepLeader(r *raft, m pb.Message) { } } case pb.MsgSnapStatus: - if pr.State != ProgressStateSnapshot { - return + if pr.State != tracker.StateSnapshot { + return nil } + // TODO(tbg): this code is very similar to the snapshot handling in + // MsgAppResp above. In fact, the code there is more correct than the + // code here and should likely be updated to match (or even better, the + // logic pulled into a newly created Progress state machine handler). if !m.Reject { - pr.becomeProbe() + pr.BecomeProbe() r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) } else { - pr.snapshotFailure() - pr.becomeProbe() + // NB: the order here matters or we'll be probing erroneously from + // the snapshot index, but the snapshot never applied. + pr.PendingSnapshot = 0 + pr.BecomeProbe() r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) } - // If snapshot finish, wait for the msgAppResp from the remote node before sending - // out the next msgApp. + // If snapshot finish, wait for the MsgAppResp from the remote node before sending + // out the next MsgApp. // If snapshot failure, wait for a heartbeat interval before next try - pr.pause() + pr.ProbeSent = true case pb.MsgUnreachable: // During optimistic replication, if the remote becomes unreachable, // there is huge probability that a MsgApp is lost. - if pr.State == ProgressStateReplicate { - pr.becomeProbe() + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() } r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) case pb.MsgTransferLeader: if pr.IsLearner { r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id) - return + return nil } leadTransferee := m.From lastLeadTransferee := r.leadTransferee @@ -1052,14 +1248,14 @@ func stepLeader(r *raft, m pb.Message) { if lastLeadTransferee == leadTransferee { r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", r.id, r.Term, leadTransferee, leadTransferee) - return + return nil } r.abortLeaderTransfer() r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) } if leadTransferee == r.id { r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) - return + return nil } // Transfer leadership to third party. r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) @@ -1073,11 +1269,12 @@ func stepLeader(r *raft, m pb.Message) { r.sendAppend(leadTransferee) } } + return nil } // stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is // whether they respond to MsgVoteResp or MsgPreVoteResp. -func stepCandidate(r *raft, m pb.Message) { +func stepCandidate(r *raft, m pb.Message) error { // Only handle vote responses corresponding to our candidacy (while in // StateCandidate, we may get stale MsgPreVoteResp messages in this term from // our pre-candidate state). @@ -1090,44 +1287,47 @@ func stepCandidate(r *raft, m pb.Message) { switch m.Type { case pb.MsgProp: r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return + return ErrProposalDropped case pb.MsgApp: - r.becomeFollower(r.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleAppendEntries(m) case pb.MsgHeartbeat: - r.becomeFollower(r.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleHeartbeat(m) case pb.MsgSnap: - r.becomeFollower(m.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleSnapshot(m) case myVoteRespType: - gr := r.poll(m.From, m.Type, !m.Reject) - r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) - switch r.quorum() { - case gr: + gr, rj, res := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj) + switch res { + case quorum.VoteWon: if r.state == StatePreCandidate { r.campaign(campaignElection) } else { r.becomeLeader() r.bcastAppend() } - case len(r.votes) - gr: + case quorum.VoteLost: + // pb.MsgPreVoteResp contains future term of pre-candidate + // m.Term > r.Term; reuse r.Term r.becomeFollower(r.Term, None) } case pb.MsgTimeoutNow: r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) } + return nil } -func stepFollower(r *raft, m pb.Message) { +func stepFollower(r *raft, m pb.Message) error { switch m.Type { case pb.MsgProp: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return + return ErrProposalDropped } else if r.disableProposalForwarding { r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) - return + return ErrProposalDropped } m.To = r.lead r.send(m) @@ -1146,7 +1346,7 @@ func stepFollower(r *raft, m pb.Message) { case pb.MsgTransferLeader: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) - return + return nil } m.To = r.lead r.send(m) @@ -1163,17 +1363,18 @@ func stepFollower(r *raft, m pb.Message) { case pb.MsgReadIndex: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) - return + return nil } m.To = r.lead r.send(m) case pb.MsgReadIndexResp: if len(m.Entries) != 1 { r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) - return + return nil } r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) } + return nil } func (r *raft) handleAppendEntries(m pb.Message) { @@ -1185,7 +1386,7 @@ func (r *raft) handleAppendEntries(m pb.Message) { if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", + r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) } @@ -1210,139 +1411,167 @@ func (r *raft) handleSnapshot(m pb.Message) { } // restore recovers the state machine from a snapshot. It restores the log and the -// configuration of state machine. +// configuration of state machine. If this method returns false, the snapshot was +// ignored, either because it was obsolete or because of an error. func (r *raft) restore(s pb.Snapshot) bool { if s.Metadata.Index <= r.raftLog.committed { return false } - if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - r.raftLog.commitTo(s.Metadata.Index) + if r.state != StateFollower { + // This is defense-in-depth: if the leader somehow ended up applying a + // snapshot, it could move into a new term without moving into a + // follower state. This should never fire, but if it did, we'd have + // prevented damage by returning early, so log only a loud warning. + // + // At the time of writing, the instance is guaranteed to be in follower + // state when this method is called. + r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id) + r.becomeFollower(r.Term+1, None) return false } - // The normal peer can't become learner. - if !r.isLearner { - for _, id := range s.Metadata.ConfState.Learners { + // More defense-in-depth: throw away snapshot if recipient is not in the + // config. This shouuldn't ever happen (at the time of writing) but lots of + // code here and there assumes that r.id is in the progress tracker. + found := false + cs := s.Metadata.ConfState + for _, set := range [][]uint64{ + cs.Voters, + cs.Learners, + } { + for _, id := range set { if id == r.id { - r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term) - return false + found = true + break } } } + if !found { + r.logger.Warningf( + "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen", + r.id, cs, + ) + return false + } - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + // Now go ahead and actually restore. + + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } r.raftLog.restore(s) - r.prs = make(map[uint64]*Progress) - r.learnerPrs = make(map[uint64]*Progress) - r.restoreNode(s.Metadata.ConfState.Nodes, false) - r.restoreNode(s.Metadata.ConfState.Learners, true) - return true -} -func (r *raft) restoreNode(nodes []uint64, isLearner bool) { - for _, n := range nodes { - match, next := uint64(0), r.raftLog.lastIndex()+1 - if n == r.id { - match = next - 1 - r.isLearner = isLearner - } - r.setProgress(n, match, next, isLearner) - r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n)) + // Reset the configuration and add the (potentially updated) peers in anew. + r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight) + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + }, cs) + + if err != nil { + // This should never happen. Either there's a bug in our config change + // handling or the client corrupted the conf change. + panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err)) } + + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + pr := r.prs.Progress[r.id] + pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + return true } // promotable indicates whether state machine can be promoted to leader, // which is true when its own id is in progress list. func (r *raft) promotable() bool { - _, ok := r.prs[r.id] - return ok + pr := r.prs.Progress[r.id] + return pr != nil && !pr.IsLearner } -func (r *raft) addNode(id uint64) { - r.addNodeOrLearnerNode(id, false) -} - -func (r *raft) addLearner(id uint64) { - r.addNodeOrLearnerNode(id, true) -} - -func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) { - r.pendingConf = false - pr := r.getProgress(id) - if pr == nil { - r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner) - } else { - if isLearner && !pr.IsLearner { - // can only change Learner to Voter - r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id) - return +func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { + cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) { + changer := confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), } - - if isLearner == pr.IsLearner { - // Ignore any redundant addNode calls (which can happen because the - // initial bootstrapping entries are applied twice). - return + if cc.LeaveJoint() { + return changer.LeaveJoint() + } else if autoLeave, ok := cc.EnterJoint(); ok { + return changer.EnterJoint(autoLeave, cc.Changes...) } + return changer.Simple(cc.Changes...) + }() - // change Learner to Voter, use origin Learner progress - delete(r.learnerPrs, id) - pr.IsLearner = false - r.prs[id] = pr - } - - if r.id == id { - r.isLearner = isLearner + if err != nil { + // TODO(tbg): return the error to the caller. + panic(err) } - // When a node is first added, we should mark it as recently active. - // Otherwise, CheckQuorum may cause us to step down if it is invoked - // before the added node has a chance to communicate with us. - pr = r.getProgress(id) - pr.RecentActive = true + return r.switchToConfig(cfg, prs) } -func (r *raft) removeNode(id uint64) { - r.delProgress(id) - r.pendingConf = false - - // do not try to commit or abort transferring if there is no nodes in the cluster. - if len(r.prs) == 0 && len(r.learnerPrs) == 0 { - return +// switchToConfig reconfigures this node to use the provided configuration. It +// updates the in-memory state and, when necessary, carries out additional +// actions such as reacting to the removal of nodes or changed quorum +// requirements. +// +// The inputs usually result from restoring a ConfState or applying a ConfChange. +func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { + r.prs.Config = cfg + r.prs.Progress = prs + + r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config) + cs := r.prs.ConfState() + pr, ok := r.prs.Progress[r.id] + + // Update whether the node itself is a learner, resetting to false when the + // node is removed. + r.isLearner = ok && pr.IsLearner + + if (!ok || r.isLearner) && r.state == StateLeader { + // This node is leader and was removed or demoted. We prevent demotions + // at the time writing but hypothetically we handle them the same way as + // removing the leader: stepping down into the next Term. + // + // TODO(tbg): step down (for sanity) and ask follower with largest Match + // to TimeoutNow (to avoid interruption). This might still drop some + // proposals but it's better than nothing. + // + // TODO(tbg): test this branch. It is untested at the time of writing. + return cs + } + + // The remaining steps only make sense if this node is the leader and there + // are other nodes. + if r.state != StateLeader || len(cs.Voters) == 0 { + return cs } - // The quorum size is now smaller, so see if any pending entries can - // be committed. if r.maybeCommit() { + // If the configuration change means that more entries are committed now, + // broadcast/append to everyone in the updated config. r.bcastAppend() - } - // If the removed node is the leadTransferee, then abort the leadership transferring. - if r.state == StateLeader && r.leadTransferee == id { + } else { + // Otherwise, still probe the newly added replicas; there's no reason to + // let them wait out a heartbeat interval (or the next incoming + // proposal). + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.maybeSendAppend(id, false /* sendIfEmpty */) + }) + } + // If the the leadTransferee was removed, abort the leadership transfer. + if _, tOK := r.prs.Progress[r.leadTransferee]; !tOK && r.leadTransferee != 0 { r.abortLeaderTransfer() } -} - -func (r *raft) resetPendingConf() { r.pendingConf = false } - -func (r *raft) setProgress(id, match, next uint64, isLearner bool) { - if !isLearner { - delete(r.learnerPrs, id) - r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} - return - } - - if _, ok := r.prs[id]; ok { - panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id)) - } - r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true} -} -func (r *raft) delProgress(id uint64) { - delete(r.prs, id) - delete(r.learnerPrs, id) + return cs } func (r *raft) loadState(state pb.HardState) { @@ -1365,29 +1594,6 @@ func (r *raft) resetRandomizedElectionTimeout() { r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) } -// checkQuorumActive returns true if the quorum is active from -// the view of the local raft state machine. Otherwise, it returns -// false. -// checkQuorumActive also resets all RecentActive to false. -func (r *raft) checkQuorumActive() bool { - var act int - - r.forEachProgress(func(id uint64, pr *Progress) { - if id == r.id { // self is always active - act++ - return - } - - if pr.RecentActive && !pr.IsLearner { - act++ - } - - pr.RecentActive = false - }) - - return act >= r.quorum() -} - func (r *raft) sendTimeoutNow(to uint64) { r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) } @@ -1396,6 +1602,49 @@ func (r *raft) abortLeaderTransfer() { r.leadTransferee = None } +// increaseUncommittedSize computes the size of the proposed entries and +// determines whether they would push leader over its maxUncommittedSize limit. +// If the new entries would exceed the limit, the method returns false. If not, +// the increase in uncommitted entry size is recorded and the method returns +// true. +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + + if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + // If the uncommitted tail of the Raft log is empty, allow any size + // proposal. Otherwise, limit the size of the uncommitted tail of the + // log and drop any proposal that would push the size over the limit. + return false + } + r.uncommittedSize += s + return true +} + +// reduceUncommittedSize accounts for the newly committed entries by decreasing +// the uncommitted entry size limit. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + if r.uncommittedSize == 0 { + // Fast-path for followers, who do not track or enforce the limit. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + if s > r.uncommittedSize { + // uncommittedSize may underestimate the size of the uncommitted Raft + // log tail but will never overestimate it. Saturate at 0 instead of + // allowing overflow. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + func numOfPendingConf(ents []pb.Entry) int { n := 0 for i := range ents { diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go new file mode 100644 index 00000000000..46a7a70212e --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go @@ -0,0 +1,170 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow +// treating them in a unified manner. +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChange, bool) +} + +// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2 +// and returns the result along with the corresponding EntryType. +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +// AsV2 returns a V2 configuration change carrying out the same operation. +func (c ConfChange) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +// AsV1 returns the ConfChange and true. +func (c ConfChange) AsV1() (ConfChange, bool) { + return c, true +} + +// AsV2 is the identity. +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +// AsV1 returns ConfChange{} and false. +func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false } + +// EnterJoint returns two bools. The second bool is true if and only if this +// config change will use Joint Consensus, which is the case if it contains more +// than one change or if the use of Joint Consensus was requested explicitly. +// The first bool can only be true if second one is, and indicates whether the +// Joint State will be left automatically. +func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // NB: in theory, more config changes could qualify for the "simple" + // protocol but it depends on the config on top of which the changes apply. + // For example, adding two learners is not OK if both nodes are part of the + // base config (i.e. two voters are turned into learners in the process of + // applying the conf change). In practice, these distinctions should not + // matter, so we keep it simple and use Joint Consensus liberally. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // Use Joint Consensus. + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("unknown transition: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +func (c *ConfChangeV2) LeaveJoint() bool { + cpy := *c + cpy.Context = nil + return proto.Equal(&cpy, &ConfChangeV2{}) +} + +// ConfChangesFromString parses a Space-delimited sequence of operations into a +// slice of ConfChangeSingle. The supported operations are: +// - vn: make n a voter, +// - ln: make n a learner, +// - rn: remove n, and +// - un: update n. +func ConfChangesFromString(s string) ([]ConfChangeSingle, error) { + var ccs []ConfChangeSingle + toks := strings.Split(strings.TrimSpace(s), " ") + if toks[0] == "" { + toks = nil + } + for _, tok := range toks { + if len(tok) < 2 { + return nil, fmt.Errorf("unknown token %s", tok) + } + var cc ConfChangeSingle + switch tok[0] { + case 'v': + cc.Type = ConfChangeAddNode + case 'l': + cc.Type = ConfChangeAddLearnerNode + case 'r': + cc.Type = ConfChangeRemoveNode + case 'u': + cc.Type = ConfChangeUpdateNode + default: + return nil, fmt.Errorf("unknown input: %s", tok) + } + id, err := strconv.ParseUint(tok[1:], 10, 64) + if err != nil { + return nil, err + } + cc.NodeID = id + ccs = append(ccs, cc) + } + return ccs, nil +} + +// ConfChangesToString is the inverse to ConfChangesFromString. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteByte('v') + case ConfChangeAddLearnerNode: + buf.WriteByte('l') + case ConfChangeRemoveNode: + buf.WriteByte('r') + case ConfChangeUpdateNode: + buf.WriteByte('u') + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go new file mode 100644 index 00000000000..4bda93214b2 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go @@ -0,0 +1,45 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent returns a nil error if the inputs describe the same configuration. +// On mismatch, returns a descriptive error showing the differences. +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + cs.XXX_unrecognized = nil + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go similarity index 52% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go index 753bd84ac62..fcf259c89be 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go @@ -1,16 +1,35 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: raft.proto +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange + ConfChangeSingle + ConfChangeV2 +*/ package raftpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -27,18 +46,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type EntryType int32 const ( - EntryNormal EntryType = 0 - EntryConfChange EntryType = 1 + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 ) var EntryType_name = map[int32]string{ 0: "EntryNormal", 1: "EntryConfChange", + 2: "EntryConfChangeV2", } - var EntryType_value = map[string]int32{ - "EntryNormal": 0, - "EntryConfChange": 1, + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, } func (x EntryType) Enum() *EntryType { @@ -46,11 +67,9 @@ func (x EntryType) Enum() *EntryType { *p = x return p } - func (x EntryType) String() string { return proto.EnumName(EntryType_name, int32(x)) } - func (x *EntryType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") if err != nil { @@ -59,10 +78,7 @@ func (x *EntryType) UnmarshalJSON(data []byte) error { *x = EntryType(value) return nil } - -func (EntryType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{0} -} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } type MessageType int32 @@ -109,7 +125,6 @@ var MessageType_name = map[int32]string{ 17: "MsgPreVote", 18: "MsgPreVoteResp", } - var MessageType_value = map[string]int32{ "MsgHup": 0, "MsgBeat": 1, @@ -137,11 +152,9 @@ func (x MessageType) Enum() *MessageType { *p = x return p } - func (x MessageType) String() string { return proto.EnumName(MessageType_name, int32(x)) } - func (x *MessageType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") if err != nil { @@ -150,10 +163,58 @@ func (x *MessageType) UnmarshalJSON(data []byte) error { *x = MessageType(value) return nil } +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} -func (MessageType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{1} +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p } +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } type ConfChangeType int32 @@ -170,7 +231,6 @@ var ConfChangeType_name = map[int32]string{ 2: "ConfChangeUpdateNode", 3: "ConfChangeAddLearnerNode", } - var ConfChangeType_value = map[string]int32{ "ConfChangeAddNode": 0, "ConfChangeRemoveNode": 1, @@ -183,11 +243,9 @@ func (x ConfChangeType) Enum() *ConfChangeType { *p = x return p } - func (x ConfChangeType) String() string { return proto.EnumName(ConfChangeType_name, int32(x)) } - func (x *ConfChangeType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") if err != nil { @@ -196,318 +254,174 @@ func (x *ConfChangeType) UnmarshalJSON(data []byte) error { *x = ConfChangeType(value) return nil } - -func (ConfChangeType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{2} -} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } type Entry struct { - Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` - Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` - Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` - Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{0} -} -func (m *Entry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Entry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Entry.Merge(m, src) -} -func (m *Entry) XXX_Size() int { - return m.Size() -} -func (m *Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Entry.DiscardUnknown(m) + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Entry proto.InternalMessageInfo +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } type SnapshotMetadata struct { - ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` - Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} -func (*SnapshotMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{1} -} -func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotMetadata.Merge(m, src) -} -func (m *SnapshotMetadata) XXX_Size() int { - return m.Size() -} -func (m *SnapshotMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m) + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{2} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } type Message struct { - Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` - To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` - From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` - Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{3} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} -func (m *Message) XXX_Size() int { - return m.Size() -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` - Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` - Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} -func (*HardState) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{4} -} -func (m *HardState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HardState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HardState) XXX_Merge(src proto.Message) { - xxx_messageInfo_HardState.Merge(m, src) -} -func (m *HardState) XXX_Size() int { - return m.Size() -} -func (m *HardState) XXX_DiscardUnknown() { - xxx_messageInfo_HardState.DiscardUnknown(m) + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_HardState proto.InternalMessageInfo +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` - Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} -func (*ConfState) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{5} -} -func (m *ConfState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfState.Merge(m, src) -} -func (m *ConfState) XXX_Size() int { - return m.Size() -} -func (m *ConfState) XXX_DiscardUnknown() { - xxx_messageInfo_ConfState.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfState proto.InternalMessageInfo + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} -func (*ConfChange) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{6} -} -func (m *ConfChange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfChange.Merge(m, src) -} -func (m *ConfChange) XXX_Size() int { - return m.Size() -} -func (m *ConfChange) XXX_DiscardUnknown() { - xxx_messageInfo_ConfChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfChange proto.InternalMessageInfo + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` + Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } func init() { - proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) - proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) - proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) proto.RegisterType((*Entry)(nil), "raftpb.Entry") proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") @@ -515,69 +429,17 @@ func init() { proto.RegisterType((*HardState)(nil), "raftpb.HardState") proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) } - -func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } - -var fileDescriptor_b042552c306ae59b = []byte{ - // 816 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45, - 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38, - 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b, - 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20, - 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3, - 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9, - 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f, - 0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a, - 0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68, - 0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2, - 0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77, - 0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, - 0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, - 0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d, - 0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7, - 0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf, - 0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1, - 0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21, - 0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac, - 0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41, - 0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a, - 0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda, - 0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72, - 0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb, - 0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f, - 0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6, - 0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac, - 0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5, - 0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72, - 0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73, - 0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4, - 0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa, - 0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b, - 0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b, - 0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36, - 0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19, - 0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6, - 0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2, - 0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8, - 0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5, - 0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7, - 0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb, - 0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24, - 0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf, - 0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3, - 0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50, - 0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78, - 0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01, - 0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, - 0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, - 0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00, -} - func (m *Entry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -585,42 +447,35 @@ func (m *Entry) Marshal() (dAtA []byte, err error) { } func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -628,42 +483,34 @@ func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { } func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- + i += n1 dAtA[i] = 0x10 - { - size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Snapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -671,43 +518,34 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) { } func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -715,89 +553,78 @@ func (m *Message) Marshal() (dAtA []byte, err error) { } func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if m.Context != nil { - i -= len(m.Context) - copy(dAtA[i:], m.Context) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i-- - dAtA[i] = 0x62 + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) - i-- - dAtA[i] = 0x58 - i-- + i += n3 + dAtA[i] = 0x50 + i++ if m.Reject { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x50 - { - size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) } - i-- - dAtA[i] = 0x4a - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - i-- - dAtA[i] = 0x40 - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x30 - i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) - i-- - dAtA[i] = 0x28 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x20 - i = encodeVarintRaft(dAtA, i, uint64(m.From)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.To)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *HardState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -805,35 +632,29 @@ func (m *HardState) Marshal() (dAtA []byte, err error) { } func (m *HardState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *ConfState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -841,40 +662,56 @@ func (m *ConfState) Marshal() (dAtA []byte, err error) { } func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Voters) > 0 { + for _, num := range m.Voters { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } } if len(m.Learners) > 0 { - for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx])) - i-- + for _, num := range m.Learners { dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) } } - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx])) - i-- - dAtA[i] = 0x8 + if len(m.VotersOutgoing) > 0 { + for _, num := range m.VotersOutgoing { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.LearnersNext) > 0 { + for _, num := range m.LearnersNext { + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) } } - return len(dAtA) - i, nil + dAtA[i] = 0x28 + i++ + if m.AutoLeave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ConfChange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -882,53 +719,110 @@ func (m *ConfChange) Marshal() (dAtA []byte, err error) { } func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) if m.Context != nil { - i -= len(m.Context) - copy(dAtA[i:], m.Context) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) } - i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) - i-- - dAtA[i] = 0x18 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.ID)) - i-- + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Context != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { - offset -= sovRaft(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Entry) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Type)) @@ -945,9 +839,6 @@ func (m *Entry) Size() (n int) { } func (m *SnapshotMetadata) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ConfState.Size() @@ -961,9 +852,6 @@ func (m *SnapshotMetadata) Size() (n int) { } func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Data != nil { @@ -979,9 +867,6 @@ func (m *Snapshot) Size() (n int) { } func (m *Message) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Type)) @@ -1012,9 +897,6 @@ func (m *Message) Size() (n int) { } func (m *HardState) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Term)) @@ -1027,13 +909,10 @@ func (m *HardState) Size() (n int) { } func (m *ConfState) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if len(m.Nodes) > 0 { - for _, e := range m.Nodes { + if len(m.Voters) > 0 { + for _, e := range m.Voters { n += 1 + sovRaft(uint64(e)) } } @@ -1042,6 +921,17 @@ func (m *ConfState) Size() (n int) { n += 1 + sovRaft(uint64(e)) } } + if len(m.VotersOutgoing) > 0 { + for _, e := range m.VotersOutgoing { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.LearnersNext) > 0 { + for _, e := range m.LearnersNext { + n += 1 + sovRaft(uint64(e)) + } + } + n += 2 if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1049,9 +939,6 @@ func (m *ConfState) Size() (n int) { } func (m *ConfChange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.ID)) @@ -1067,8 +954,46 @@ func (m *ConfChange) Size() (n int) { return n } +func (m *ConfChangeSingle) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeV2) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovRaft(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozRaft(x uint64) (n int) { return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1088,7 +1013,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1116,7 +1041,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= EntryType(b&0x7F) << shift + m.Type |= (EntryType(b) & 0x7F) << shift if b < 0x80 { break } @@ -1135,7 +1060,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1154,7 +1079,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1173,7 +1098,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1182,9 +1107,6 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1202,9 +1124,6 @@ func (m *Entry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1233,7 +1152,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1261,7 +1180,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1270,9 +1189,6 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1294,7 +1210,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1313,7 +1229,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1327,9 +1243,6 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1358,7 +1271,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1386,7 +1299,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1395,9 +1308,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1420,7 +1330,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1429,9 +1339,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,9 +1355,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1479,7 +1383,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1507,7 +1411,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= MessageType(b&0x7F) << shift + m.Type |= (MessageType(b) & 0x7F) << shift if b < 0x80 { break } @@ -1526,7 +1430,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.To |= uint64(b&0x7F) << shift + m.To |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1545,7 +1449,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.From |= uint64(b&0x7F) << shift + m.From |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1564,7 +1468,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1583,7 +1487,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LogTerm |= uint64(b&0x7F) << shift + m.LogTerm |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1602,7 +1506,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1621,7 +1525,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1630,9 +1534,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1655,7 +1556,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Commit |= uint64(b&0x7F) << shift + m.Commit |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1674,7 +1575,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1683,9 +1584,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1707,7 +1605,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1727,7 +1625,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RejectHint |= uint64(b&0x7F) << shift + m.RejectHint |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1746,7 +1644,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1755,9 +1653,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1765,7 +1660,434 @@ func (m *Message) Unmarshal(dAtA []byte) error { if m.Context == nil { m.Context = []byte{} } - iNdEx = postIndex + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLeave = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1775,9 +2097,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2110,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } return nil } -func (m *HardState) Unmarshal(dAtA []byte) error { +func (m *ConfChange) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1806,7 +2125,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1814,17 +2133,17 @@ func (m *HardState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HardState: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - m.Term = 0 + m.ID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1834,16 +2153,16 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Vote = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1853,16 +2172,35 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Vote |= uint64(b&0x7F) << shift + m.Type |= (ConfChangeType(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) } - m.Commit = 0 + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1872,11 +2210,23 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Commit |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1886,9 +2236,6 @@ func (m *HardState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1902,7 +2249,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfState) Unmarshal(dAtA []byte) error { +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1917,7 +2264,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1925,163 +2272,49 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRaft + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Nodes) == 0 { - m.Nodes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } case 2: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Learners = append(m.Learners, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRaft + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Learners) == 0 { - m.Learners = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Learners = append(m.Learners, v) + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) } default: iNdEx = preIndex @@ -2092,9 +2325,6 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2108,7 +2338,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfChange) Unmarshal(dAtA []byte) error { +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2123,7 +2353,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2131,17 +2361,17 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType) } - m.ID = 0 + m.Transition = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -2151,16 +2381,16 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.Transition |= (ConfChangeTransition(b) & 0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) } - m.Type = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -2170,31 +2400,24 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= ConfChangeType(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + if msglen < 0 { + return ErrInvalidLengthRaft } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - case 4: + m.Changes = append(m.Changes, ConfChangeSingle{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } @@ -2208,7 +2431,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2217,9 +2440,6 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2237,9 +2457,6 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2307,11 +2524,8 @@ func skipRaft(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRaft - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRaft } return iNdEx, nil @@ -2342,9 +2556,6 @@ func skipRaft(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRaft - } } return iNdEx, nil case 4: @@ -2363,3 +2574,73 @@ var ( ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xe3, 0x36, + 0x17, 0xb5, 0x64, 0xc5, 0x3f, 0xd7, 0x8e, 0xc3, 0xdc, 0xc9, 0x37, 0x20, 0x82, 0xc0, 0xe3, 0xcf, + 0xd3, 0x62, 0x8c, 0x14, 0x93, 0x16, 0x5e, 0x14, 0x45, 0x77, 0xf9, 0x19, 0x20, 0x29, 0xe2, 0x74, + 0xea, 0x64, 0xb2, 0x28, 0x50, 0x04, 0x8c, 0x45, 0x2b, 0x6a, 0x2d, 0x51, 0xa0, 0xe8, 0x34, 0xd9, + 0x14, 0x45, 0x9f, 0xa2, 0x9b, 0xd9, 0xf6, 0x01, 0xfa, 0x14, 0x59, 0x0e, 0xd0, 0xfd, 0xa0, 0x93, + 0xbe, 0x48, 0x41, 0x8a, 0xb2, 0x65, 0x27, 0x98, 0x45, 0x77, 0xe4, 0x39, 0x87, 0xf7, 0x9e, 0x7b, + 0x79, 0x45, 0x01, 0x48, 0x36, 0x56, 0x3b, 0x89, 0x14, 0x4a, 0x60, 0x45, 0xaf, 0x93, 0xcb, 0xcd, + 0x8d, 0x40, 0x04, 0xc2, 0x40, 0x9f, 0xeb, 0x55, 0xc6, 0x76, 0x7f, 0x81, 0x95, 0x57, 0xb1, 0x92, + 0xb7, 0xf8, 0x19, 0x78, 0x67, 0xb7, 0x09, 0xa7, 0x4e, 0xc7, 0xe9, 0xb5, 0xfa, 0xeb, 0x3b, 0xd9, + 0xa9, 0x1d, 0x43, 0x6a, 0x62, 0xcf, 0xbb, 0x7b, 0xff, 0xac, 0x34, 0x34, 0x22, 0xa4, 0xe0, 0x9d, + 0x71, 0x19, 0x51, 0xb7, 0xe3, 0xf4, 0xbc, 0x19, 0xc3, 0x65, 0x84, 0x9b, 0xb0, 0x72, 0x14, 0xfb, + 0xfc, 0x86, 0x96, 0x0b, 0x54, 0x06, 0x21, 0x82, 0x77, 0xc0, 0x14, 0xa3, 0x5e, 0xc7, 0xe9, 0x35, + 0x87, 0x66, 0xdd, 0xfd, 0xd5, 0x01, 0x72, 0x1a, 0xb3, 0x24, 0xbd, 0x12, 0x6a, 0xc0, 0x15, 0xf3, + 0x99, 0x62, 0xf8, 0x25, 0xc0, 0x48, 0xc4, 0xe3, 0x8b, 0x54, 0x31, 0x95, 0x39, 0x6a, 0xcc, 0x1d, + 0xed, 0x8b, 0x78, 0x7c, 0xaa, 0x09, 0x1b, 0xbc, 0x3e, 0xca, 0x01, 0x9d, 0x3c, 0x34, 0xc9, 0x8b, + 0xbe, 0x32, 0x48, 0x5b, 0x56, 0xda, 0x72, 0xd1, 0x97, 0x41, 0xba, 0xdf, 0x43, 0x2d, 0x77, 0xa0, + 0x2d, 0x6a, 0x07, 0x26, 0x67, 0x73, 0x68, 0xd6, 0xf8, 0x35, 0xd4, 0x22, 0xeb, 0xcc, 0x04, 0x6e, + 0xf4, 0x69, 0xee, 0x65, 0xd9, 0xb9, 0x8d, 0x3b, 0xd3, 0x77, 0xdf, 0x96, 0xa1, 0x3a, 0xe0, 0x69, + 0xca, 0x02, 0x8e, 0x2f, 0xc1, 0x53, 0xf3, 0x0e, 0x3f, 0xc9, 0x63, 0x58, 0xba, 0xd8, 0x63, 0x2d, + 0xc3, 0x0d, 0x70, 0x95, 0x58, 0xa8, 0xc4, 0x55, 0x42, 0x97, 0x31, 0x96, 0x62, 0xa9, 0x0c, 0x8d, + 0xcc, 0x0a, 0xf4, 0x96, 0x0b, 0xc4, 0x36, 0x54, 0x27, 0x22, 0x30, 0x17, 0xb6, 0x52, 0x20, 0x73, + 0x70, 0xde, 0xb6, 0xca, 0xc3, 0xb6, 0xbd, 0x84, 0x2a, 0x8f, 0x95, 0x0c, 0x79, 0x4a, 0xab, 0x9d, + 0x72, 0xaf, 0xd1, 0x5f, 0x5d, 0x98, 0x8c, 0x3c, 0x94, 0xd5, 0xe0, 0x16, 0x54, 0x46, 0x22, 0x8a, + 0x42, 0x45, 0x6b, 0x85, 0x58, 0x16, 0xc3, 0x3e, 0xd4, 0x52, 0xdb, 0x31, 0x5a, 0x37, 0x9d, 0x24, + 0xcb, 0x9d, 0xcc, 0x3b, 0x98, 0xeb, 0x74, 0x44, 0xc9, 0x7f, 0xe4, 0x23, 0x45, 0xa1, 0xe3, 0xf4, + 0x6a, 0x79, 0xc4, 0x0c, 0xc3, 0x4f, 0x00, 0xb2, 0xd5, 0x61, 0x18, 0x2b, 0xda, 0x28, 0xe4, 0x2c, + 0xe0, 0x48, 0xa1, 0x3a, 0x12, 0xb1, 0xe2, 0x37, 0x8a, 0x36, 0xcd, 0xc5, 0xe6, 0xdb, 0xee, 0x0f, + 0x50, 0x3f, 0x64, 0xd2, 0xcf, 0xc6, 0x27, 0xef, 0xa0, 0xf3, 0xa0, 0x83, 0x14, 0xbc, 0x6b, 0xa1, + 0xf8, 0xe2, 0xbc, 0x6b, 0xa4, 0x50, 0x70, 0xf9, 0x61, 0xc1, 0xdd, 0x3f, 0x1d, 0xa8, 0xcf, 0xe6, + 0x15, 0x9f, 0x42, 0x45, 0x9f, 0x91, 0x29, 0x75, 0x3a, 0xe5, 0x9e, 0x37, 0xb4, 0x3b, 0xdc, 0x84, + 0xda, 0x84, 0x33, 0x19, 0x6b, 0xc6, 0x35, 0xcc, 0x6c, 0x8f, 0x2f, 0x60, 0x2d, 0x53, 0x5d, 0x88, + 0xa9, 0x0a, 0x44, 0x18, 0x07, 0xb4, 0x6c, 0x24, 0xad, 0x0c, 0xfe, 0xd6, 0xa2, 0xf8, 0x1c, 0x56, + 0xf3, 0x43, 0x17, 0xb1, 0xae, 0xd4, 0x33, 0xb2, 0x66, 0x0e, 0x9e, 0xf0, 0x1b, 0x85, 0xcf, 0x01, + 0xd8, 0x54, 0x89, 0x8b, 0x09, 0x67, 0xd7, 0xdc, 0x0c, 0x43, 0xde, 0xd0, 0xba, 0xc6, 0x8f, 0x35, + 0xdc, 0x7d, 0xeb, 0x00, 0x68, 0xd3, 0xfb, 0x57, 0x2c, 0x0e, 0xf4, 0x47, 0xe5, 0x86, 0xbe, 0xed, + 0x09, 0x68, 0xed, 0xfd, 0xfb, 0x67, 0xee, 0xd1, 0xc1, 0xd0, 0x0d, 0x7d, 0xfc, 0xc2, 0x8e, 0xb4, + 0x6b, 0x46, 0xfa, 0x69, 0xf1, 0x13, 0xcd, 0x4e, 0x3f, 0x98, 0xea, 0x17, 0x50, 0x8d, 0x85, 0xcf, + 0x2f, 0x42, 0xdf, 0x36, 0xac, 0x65, 0x43, 0x56, 0x4e, 0x84, 0xcf, 0x8f, 0x0e, 0x86, 0x15, 0x4d, + 0x1f, 0xf9, 0xc5, 0x3b, 0xf3, 0x16, 0xef, 0x2c, 0x02, 0x32, 0x4f, 0x70, 0x1a, 0xc6, 0xc1, 0x84, + 0xcf, 0x8c, 0x38, 0xff, 0xc5, 0x88, 0xfb, 0x31, 0x23, 0xdd, 0x3f, 0x1c, 0x68, 0xce, 0xe3, 0x9c, + 0xf7, 0x71, 0x0f, 0x40, 0x49, 0x16, 0xa7, 0xa1, 0x0a, 0x45, 0x6c, 0x33, 0x6e, 0x3d, 0x92, 0x71, + 0xa6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x82, 0xea, 0xc8, 0xa8, 0xb2, 0x1b, 0x2f, 0x3c, 0x29, + 0xcb, 0xa5, 0xe5, 0x5f, 0x98, 0x95, 0x17, 0xfb, 0x52, 0x5e, 0xe8, 0xcb, 0xf6, 0x21, 0xd4, 0x67, + 0xaf, 0x35, 0xae, 0x41, 0xc3, 0x6c, 0x4e, 0x84, 0x8c, 0xd8, 0x84, 0x94, 0xf0, 0x09, 0xac, 0x19, + 0x60, 0x1e, 0x9f, 0x38, 0xf8, 0x3f, 0x58, 0x5f, 0x02, 0xcf, 0xfb, 0xc4, 0xdd, 0xfe, 0xcb, 0x85, + 0x46, 0xe1, 0x59, 0x42, 0x80, 0xca, 0x20, 0x0d, 0x0e, 0xa7, 0x09, 0x29, 0x61, 0x03, 0xaa, 0x83, + 0x34, 0xd8, 0xe3, 0x4c, 0x11, 0xc7, 0x6e, 0x5e, 0x4b, 0x91, 0x10, 0xd7, 0xaa, 0x76, 0x93, 0x84, + 0x94, 0xb1, 0x05, 0x90, 0xad, 0x87, 0x3c, 0x4d, 0x88, 0x67, 0x85, 0xe7, 0x42, 0x71, 0xb2, 0xa2, + 0xbd, 0xd9, 0x8d, 0x61, 0x2b, 0x96, 0xd5, 0x4f, 0x00, 0xa9, 0x22, 0x81, 0xa6, 0x4e, 0xc6, 0x99, + 0x54, 0x97, 0x3a, 0x4b, 0x0d, 0x37, 0x80, 0x14, 0x11, 0x73, 0xa8, 0x8e, 0x08, 0xad, 0x41, 0x1a, + 0xbc, 0x89, 0x25, 0x67, 0xa3, 0x2b, 0x76, 0x39, 0xe1, 0x04, 0x70, 0x1d, 0x56, 0x6d, 0x20, 0xfd, + 0xc5, 0x4d, 0x53, 0xd2, 0xb0, 0xb2, 0xfd, 0x2b, 0x3e, 0xfa, 0xe9, 0xbb, 0xa9, 0x90, 0xd3, 0x88, + 0x34, 0x75, 0xd9, 0x83, 0x34, 0x30, 0x17, 0x34, 0xe6, 0xf2, 0x98, 0x33, 0x9f, 0x4b, 0xb2, 0x6a, + 0x4f, 0x9f, 0x85, 0x11, 0x17, 0x53, 0x75, 0x22, 0x7e, 0x26, 0x2d, 0x6b, 0x66, 0xc8, 0x99, 0x6f, + 0x7e, 0x61, 0x64, 0xcd, 0x9a, 0x99, 0x21, 0xc6, 0x0c, 0xb1, 0xf5, 0xbe, 0x96, 0xdc, 0x94, 0xb8, + 0x6e, 0xb3, 0xda, 0xbd, 0xd1, 0xe0, 0xf6, 0x6f, 0x0e, 0x6c, 0x3c, 0x36, 0x1e, 0xb8, 0x05, 0xf4, + 0x31, 0x7c, 0x77, 0xaa, 0x04, 0x29, 0xe1, 0xa7, 0xf0, 0xff, 0xc7, 0xd8, 0x6f, 0x44, 0x18, 0xab, + 0xa3, 0x28, 0x99, 0x84, 0xa3, 0x50, 0x5f, 0xc5, 0xc7, 0x64, 0xaf, 0x6e, 0xac, 0xcc, 0xdd, 0xbe, + 0x85, 0xd6, 0xe2, 0x47, 0xa1, 0x9b, 0x31, 0x47, 0x76, 0x7d, 0x5f, 0x8f, 0x3f, 0x29, 0x21, 0x2d, + 0x9a, 0x1d, 0xf2, 0x48, 0x5c, 0x73, 0xc3, 0x38, 0x8b, 0xcc, 0x9b, 0xc4, 0x67, 0x2a, 0x63, 0xdc, + 0xc5, 0x42, 0x76, 0x7d, 0xff, 0x38, 0x7b, 0x7b, 0x0c, 0x5b, 0xde, 0xa3, 0x77, 0x1f, 0xda, 0xa5, + 0x77, 0x1f, 0xda, 0xa5, 0xbb, 0xfb, 0xb6, 0xf3, 0xee, 0xbe, 0xed, 0xfc, 0x7d, 0xdf, 0x76, 0x7e, + 0xff, 0xa7, 0x5d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x87, 0x11, 0x6d, 0xd6, 0xaf, 0x08, 0x00, + 0x00, +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto new file mode 100644 index 00000000000..23d62ec2fb0 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto @@ -0,0 +1,177 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/rawnode.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/rawnode.go similarity index 60% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/rawnode.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/rawnode.go index 925cb851c4a..90eb69493c6 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/rawnode.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/rawnode.go @@ -17,7 +17,8 @@ package raft import ( "errors" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) // ErrStepLocalMsg is returned when try to step a local raft message @@ -36,85 +37,20 @@ type RawNode struct { prevHardSt pb.HardState } -func (rn *RawNode) newReady() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -func (rn *RawNode) commitReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - if rn.prevHardSt.Commit != 0 { - // In most cases, prevHardSt and rd.HardState will be the same - // because when there are new entries to apply we just sent a - // HardState with an updated Commit value. However, on initial - // startup the two are different because we don't send a HardState - // until something changes, but we do send any un-applied but - // committed entries (and previously-committed entries may be - // incorporated into the snapshot, even if rd.CommittedEntries is - // empty). Therefore we mark all committed entries as applied - // whether they were included in rd.HardState or not. - rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) - } - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - rn.raft.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } - if len(rd.ReadStates) != 0 { - rn.raft.readStates = nil - } -} - -// NewRawNode returns a new RawNode given configuration and a list of raft peers. -func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { - if config.ID == 0 { - panic("config.ID must not be zero") - } +// NewRawNode instantiates a RawNode from the given configuration. +// +// See Bootstrap() for bootstrapping an initial state; this replaces the former +// 'peers' argument to this method (with identical behavior). However, It is +// recommended that instead of calling Bootstrap, applications bootstrap their +// state manually by setting up a Storage that has a first index > 1 and which +// stores the desired ConfState as its InitialState. +func NewRawNode(config *Config) (*RawNode, error) { r := newRaft(config) rn := &RawNode{ raft: r, } - lastIndex, err := config.Storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - // If the log is empty, this is a new RawNode (like StartNode); otherwise it's - // restoring an existing RawNode (like RestartNode). - // TODO(bdarnell): rethink RawNode initialization and whether the application needs - // to be able to tell us when it expects the RawNode to exist. - if lastIndex == 0 { - r.becomeFollower(1, None) - ents := make([]pb.Entry, len(peers)) - for i, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - data, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - - ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} - } - r.raftLog.append(ents...) - r.raftLog.committed = uint64(len(ents)) - for _, peer := range peers { - r.addNode(peer.ID) - } - } - - // Set the initial hard and soft states after performing all initialization. rn.prevSoftSt = r.softState() - if lastIndex == 0 { - rn.prevHardSt = emptyState - } else { - rn.prevHardSt = r.hardState() - } - + rn.prevHardSt = r.hardState() return rn, nil } @@ -152,39 +88,20 @@ func (rn *RawNode) Propose(data []byte) error { }}) } -// ProposeConfChange proposes a config change. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { - data, err := cc.Marshal() +// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) if err != nil { return err } - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - Entries: []pb.Entry{ - {Type: pb.EntryConfChange, Data: data}, - }, - }) + return rn.raft.Step(m) } // ApplyConfChange applies a config change to the local node. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - if cc.NodeID == None { - rn.raft.resetPendingConf() - return &pb.ConfState{Nodes: rn.raft.nodes()} - } - switch cc.Type { - case pb.ConfChangeAddNode: - rn.raft.addNode(cc.NodeID) - case pb.ConfChangeAddLearnerNode: - rn.raft.addLearner(cc.NodeID) - case pb.ConfChangeRemoveNode: - rn.raft.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - rn.raft.resetPendingConf() - default: - panic("unexpected conf type") - } - return &pb.ConfState{Nodes: rn.raft.nodes()} +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs } // Step advances the state machine using the given message. @@ -193,19 +110,41 @@ func (rn *RawNode) Step(m pb.Message) error { if IsLocalMsg(m.Type) { return ErrStepLocalMsg } - if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { + if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { return rn.raft.Step(m) } return ErrStepPeerNotFound } -// Ready returns the current point-in-time state of this RawNode. +// Ready returns the outstanding work that the application needs to handle. This +// includes appending and applying entries or a snapshot, updating the HardState, +// and sending messages. The returned Ready() *must* be handled and subsequently +// passed back via Advance(). func (rn *RawNode) Ready() Ready { - rd := rn.newReady() - rn.raft.msgs = nil + rd := rn.readyWithoutAccept() + rn.acceptReady(rd) return rd } +// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there +// is no obligation that the Ready must be handled. +func (rn *RawNode) readyWithoutAccept() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +// acceptReady is called when the consumer of the RawNode has decided to go +// ahead and handle a Ready. Nothing must alter the state of the RawNode between +// this call and the prior call to Ready(). +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } + rn.raft.msgs = nil +} + // HasReady called when RawNode user need to check if any Ready pending. // Checking logic in this method should be consistent with Ready.containsUpdates(). func (rn *RawNode) HasReady() bool { @@ -231,13 +170,47 @@ func (rn *RawNode) HasReady() bool { // Advance notifies the RawNode that the application has applied and saved progress in the // last Ready results. func (rn *RawNode) Advance(rd Ready) { - rn.commitReady(rd) + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + rn.raft.advance(rd) } -// Status returns the current status of the given group. -func (rn *RawNode) Status() *Status { +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { status := getStatus(rn.raft) - return &status + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this node and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) } // ReportUnreachable reports the given node is not reachable for the last send. diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/read_only.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/read_only.go similarity index 79% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/read_only.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/read_only.go index ae746fa73eb..6987f1bd7d7 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/read_only.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/read_only.go @@ -14,7 +14,7 @@ package raft -import pb "github.com/coreos/etcd/raft/raftpb" +import pb "go.etcd.io/etcd/raft/raftpb" // ReadState provides state for read only query. // It's caller's responsibility to call ReadIndex first before getting @@ -29,7 +29,11 @@ type ReadState struct { type readIndexStatus struct { req pb.Message index uint64 - acks map[uint64]struct{} + // NB: this never records 'false', but it's more convenient to use this + // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If + // this becomes performance sensitive enough (doubtful), quorum.VoteResult + // can change to an API that is closer to that of CommittedIndex. + acks map[uint64]bool } type readOnly struct { @@ -50,26 +54,25 @@ func newReadOnly(option ReadOnlyOption) *readOnly { // the read only request. // `m` is the original read only request message from the local or remote node. func (ro *readOnly) addRequest(index uint64, m pb.Message) { - ctx := string(m.Entries[0].Data) - if _, ok := ro.pendingReadIndex[ctx]; ok { + s := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[s]; ok { return } - ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} - ro.readIndexQueue = append(ro.readIndexQueue, ctx) + ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)} + ro.readIndexQueue = append(ro.readIndexQueue, s) } // recvAck notifies the readonly struct that the raft state machine received // an acknowledgment of the heartbeat that attached with the read only request // context. -func (ro *readOnly) recvAck(m pb.Message) int { - rs, ok := ro.pendingReadIndex[string(m.Context)] +func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool { + rs, ok := ro.pendingReadIndex[string(context)] if !ok { - return 0 + return nil } - rs.acks[m.From] = struct{}{} - // add one to include an ack from local node - return len(rs.acks) + 1 + rs.acks[id] = true + return rs.acks } // advance advances the read only request queue kept by the readonly struct. diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/status.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/status.go similarity index 68% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/status.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/status.go index f4d3d86a4e3..adc60486d9c 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/status.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/status.go @@ -17,44 +17,62 @@ package raft import ( "fmt" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) +// Status contains information about this Raft peer and its view of the system. +// The Progress is only populated on the leader. type Status struct { + BasicStatus + Config tracker.Config + Progress map[uint64]tracker.Progress +} + +// BasicStatus contains basic information about the Raft peer. It does not allocate. +type BasicStatus struct { ID uint64 pb.HardState SoftState - Applied uint64 - Progress map[uint64]Progress + Applied uint64 LeadTransferee uint64 } -// getStatus gets a copy of the current raft status. -func getStatus(r *raft) Status { - s := Status{ +func getProgressCopy(r *raft) map[uint64]tracker.Progress { + m := make(map[uint64]tracker.Progress) + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + var p tracker.Progress + p = *pr + p.Inflights = pr.Inflights.Clone() + pr = nil + + m[id] = p + }) + return m +} + +func getBasicStatus(r *raft) BasicStatus { + s := BasicStatus{ ID: r.id, LeadTransferee: r.leadTransferee, } - s.HardState = r.hardState() s.SoftState = *r.softState() - s.Applied = r.raftLog.applied + return s +} +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + var s Status + s.BasicStatus = getBasicStatus(r) if s.RaftState == StateLeader { - s.Progress = make(map[uint64]Progress) - for id, p := range r.prs { - s.Progress[id] = *p - } - - for id, p := range r.learnerPrs { - s.Progress[id] = *p - } + s.Progress = getProgressCopy(r) } - + s.Config = r.prs.Config.Clone() return s } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/storage.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/storage.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/raft/storage.go rename to cluster-autoscaler/vendor/go.etcd.io/etcd/raft/storage.go index 69c3a7d9033..6be574590e0 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/raft/storage.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/storage.go @@ -18,7 +18,7 @@ import ( "errors" "sync" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) // ErrCompacted is returned by Storage.Entries/Compact when a requested @@ -44,6 +44,8 @@ var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unav // become inoperable and refuse to participate in elections; the // application is responsible for cleanup and recovery in this case. type Storage interface { + // TODO(tbg): split this into two interfaces, LogStorage and StateStorage. + // InitialState returns the saved HardState and ConfState information. InitialState() (pb.HardState, pb.ConfState, error) // Entries returns a slice of log entries in the range [lo,hi). diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/inflights.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/inflights.go new file mode 100644 index 00000000000..1a056341ab5 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/inflights.go @@ -0,0 +1,132 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// Inflights limits the number of MsgApp (represented by the largest index +// contained within) sent to followers but not yet acknowledged by them. Callers +// use Full() to check whether more messages can be sent, call Add() whenever +// they are sending a new append, and release "quota" via FreeLE() whenever an +// ack is received. +type Inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +// NewInflights sets up an Inflights that allows up to 'size' inflight messages. +func NewInflights(size int) *Inflights { + return &Inflights{ + size: size, + } +} + +// Clone returns an *Inflights that is identical to but shares no memory with +// the receiver. +func (in *Inflights) Clone() *Inflights { + ins := *in + ins.buffer = append([]uint64(nil), in.buffer...) + return &ins +} + +// Add notifies the Inflights that a new message with the given index is being +// dispatched. Full() must be called prior to Add() to verify that there is room +// for one more message, and consecutive calls to add Add() must provide a +// monotonic sequence of indexes. +func (in *Inflights) Add(inflight uint64) { + if in.Full() { + panic("cannot add into a Full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.grow() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *Inflights) grow() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// FreeLE frees the inflights smaller or equal to the given `to` flight. +func (in *Inflights) FreeLE(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + idx := in.start + var i int + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +// FreeFirstOne releases the first inflight. This is a no-op if nothing is +// inflight. +func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } + +// Full returns true if no more messages can be sent at the moment. +func (in *Inflights) Full() bool { + return in.count == in.size +} + +// Count returns the number of inflight messages. +func (in *Inflights) Count() int { return in.count } + +// reset frees all inflights. +func (in *Inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/progress.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/progress.go new file mode 100644 index 00000000000..62c81f45af8 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/progress.go @@ -0,0 +1,259 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" +) + +// Progress represents a follower’s progress in the view of the leader. Leader +// maintains progresses of all followers, and sends entries to the follower +// based on its progress. +// +// NB(tbg): Progress is basically a state machine whose transitions are mostly +// strewn around `*raft.raft`. Additionally, some fields are only used when in a +// certain State. All of this isn't ideal. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in StateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in StateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in StateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State StateType + + // PendingSnapshot is used in StateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + // + // TODO(tbg): the leader should always have this set to true. + RecentActive bool + + // ProbeSent is used while this follower is in StateProbe. When ProbeSent is + // true, raft should pause sending replication message to this peer until + // ProbeSent is reset. See ProbeAcked() and IsPaused(). + ProbeSent bool + + // Inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is Full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.FreeLE with the index of the last + // received entry. + Inflights *Inflights + + // IsLearner is true if this progress is tracked for a learner. + IsLearner bool +} + +// ResetState moves the Progress into the specified State, resetting ProbeSent, +// PendingSnapshot, and Inflights. +func (pr *Progress) ResetState(state StateType) { + pr.ProbeSent = false + pr.PendingSnapshot = 0 + pr.State = state + pr.Inflights.reset() +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +// ProbeAcked is called when this peer has accepted an append. It resets +// ProbeSent to signal that additional append messages should be sent without +// further delay. +func (pr *Progress) ProbeAcked() { + pr.ProbeSent = false +} + +// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or, +// optionally and if larger, the index of the pending snapshot. +func (pr *Progress) BecomeProbe() { + // If the original state is StateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == StateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.ResetState(StateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.ResetState(StateProbe) + pr.Next = pr.Match + 1 + } +} + +// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1. +func (pr *Progress) BecomeReplicate() { + pr.ResetState(StateReplicate) + pr.Next = pr.Match + 1 +} + +// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending +// snapshot index. +func (pr *Progress) BecomeSnapshot(snapshoti uint64) { + pr.ResetState(StateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the +// index acked by it. The method returns false if the given n index comes from +// an outdated message. Otherwise it updates the progress and returns true. +func (pr *Progress) MaybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.ProbeAcked() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +// OptimisticUpdate signals that appends all the way up to and including index n +// are in-flight. As a result, Next is increased to n+1. +func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 } + +// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The +// arguments are the index the follower rejected to append to its log, and its +// last index. +// +// Rejections can happen spuriously as messages are sent out of order or +// duplicated. In such cases, the rejection pertains to an index that the +// Progress already knows were previously acknowledged, and false is returned +// without changing the Progress. +// +// If the rejection is genuine, Next is lowered sensibly, and the Progress is +// cleared for sending log entries. +func (pr *Progress) MaybeDecrTo(rejected, last uint64) bool { + if pr.State == StateReplicate { + // The rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // Directly decrease next to match + 1. + // + // TODO(tbg): why not use last if it's larger? + pr.Next = pr.Match + 1 + return true + } + + // The rejection must be stale if "rejected" does not match next - 1. This + // is because non-replicating followers are probed one entry at a time. + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.ProbeSent = false + return true +} + +// IsPaused returns whether sending log entries to this node has been throttled. +// This is done when a node has rejected recent MsgApps, is currently waiting +// for a snapshot, or has reached the MaxInflightMsgs limit. In normal +// operation, this is false. A throttled node will be contacted less frequently +// until it has reached a state in which it's able to accept a steady stream of +// log entries again. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case StateProbe: + return pr.ProbeSent + case StateReplicate: + return pr.Inflights.Full() + case StateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + if pr.IsLearner { + fmt.Fprint(&buf, " learner") + } + if pr.IsPaused() { + fmt.Fprint(&buf, " paused") + } + if pr.PendingSnapshot > 0 { + fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot) + } + if !pr.RecentActive { + fmt.Fprintf(&buf, " inactive") + } + if n := pr.Inflights.Count(); n > 0 { + fmt.Fprintf(&buf, " inflight=%d", n) + if pr.Inflights.Full() { + fmt.Fprint(&buf, "[full]") + } + } + return buf.String() +} + +// ProgressMap is a map of *Progress. +type ProgressMap map[uint64]*Progress + +// String prints the ProgressMap in sorted key order, one Progress per line. +func (m ProgressMap) String() string { + ids := make([]uint64, 0, len(m)) + for k := range m { + ids = append(ids, k) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d: %s\n", id, m[id]) + } + return buf.String() +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/state.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/state.go new file mode 100644 index 00000000000..285b4b8f580 --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/state.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// StateType is the state of a tracked follower. +type StateType uint64 + +const ( + // StateProbe indicates a follower whose last index isn't known. Such a + // follower is "probed" (i.e. an append sent periodically) to narrow down + // its last index. In the ideal (and common) case, only one round of probing + // is necessary as the follower will react with a hint. Followers that are + // probed over extended periods of time are often offline. + StateProbe StateType = iota + // StateReplicate is the state steady in which a follower eagerly receives + // log entries to append to its log. + StateReplicate + // StateSnapshot indicates a follower that needs log entries not available + // from the leader's Raft log. Such a follower needs a full snapshot to + // return to StateReplicate. + StateSnapshot +) + +var prstmap = [...]string{ + "StateProbe", + "StateReplicate", + "StateSnapshot", +} + +func (st StateType) String() string { return prstmap[uint64(st)] } diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/tracker.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/tracker.go new file mode 100644 index 00000000000..a4581143d1e --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/tracker/tracker.go @@ -0,0 +1,288 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Config reflects the configuration tracked in a ProgressTracker. +type Config struct { + Voters quorum.JointConfig + // AutoLeave is true if the configuration is joint and a transition to the + // incoming configuration should be carried out automatically by Raft when + // this is possible. If false, the configuration will be joint until the + // application initiates the transition manually. + AutoLeave bool + // Learners is a set of IDs corresponding to the learners active in the + // current configuration. + // + // Invariant: Learners and Voters does not intersect, i.e. if a peer is in + // either half of the joint config, it can't be a learner; if it is a + // learner it can't be in either half of the joint config. This invariant + // simplifies the implementation since it allows peers to have clarity about + // its current role without taking into account joint consensus. + Learners map[uint64]struct{} + // When we turn a voter into a learner during a joint consensus transition, + // we cannot add the learner directly when entering the joint state. This is + // because this would violate the invariant that the intersection of + // voters and learners is empty. For example, assume a Voter is removed and + // immediately re-added as a learner (or in other words, it is demoted): + // + // Initially, the configuration will be + // + // voters: {1 2 3} + // learners: {} + // + // and we want to demote 3. Entering the joint configuration, we naively get + // + // voters: {1 2} & {1 2 3} + // learners: {3} + // + // but this violates the invariant (3 is both voter and learner). Instead, + // we get + // + // voters: {1 2} & {1 2 3} + // learners: {} + // next_learners: {3} + // + // Where 3 is now still purely a voter, but we are remembering the intention + // to make it a learner upon transitioning into the final configuration: + // + // voters: {1 2} + // learners: {3} + // next_learners: {} + // + // Note that next_learners is not used while adding a learner that is not + // also a voter in the joint config. In this case, the learner is added + // right away when entering the joint configuration, so that it is caught up + // as soon as possible. + LearnersNext map[uint64]struct{} +} + +func (c Config) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "voters=%s", c.Voters) + if c.Learners != nil { + fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String()) + } + if c.LearnersNext != nil { + fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) + } + if c.AutoLeave { + fmt.Fprintf(&buf, " autoleave") + } + return buf.String() +} + +// Clone returns a copy of the Config that shares no memory with the original. +func (c *Config) Clone() Config { + clone := func(m map[uint64]struct{}) map[uint64]struct{} { + if m == nil { + return nil + } + mm := make(map[uint64]struct{}, len(m)) + for k := range m { + mm[k] = struct{}{} + } + return mm + } + return Config{ + Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])}, + Learners: clone(c.Learners), + LearnersNext: clone(c.LearnersNext), + } +} + +// ProgressTracker tracks the currently active configuration and the information +// known about the nodes and learners in it. In particular, it tracks the match +// index for each peer which in turn allows reasoning about the committed index. +type ProgressTracker struct { + Config + + Progress ProgressMap + + Votes map[uint64]bool + + MaxInflight int +} + +// MakeProgressTracker initializes a ProgressTracker. +func MakeProgressTracker(maxInflight int) ProgressTracker { + p := ProgressTracker{ + MaxInflight: maxInflight, + Config: Config{ + Voters: quorum.JointConfig{ + quorum.MajorityConfig{}, + nil, // only populated when used + }, + Learners: nil, // only populated when used + LearnersNext: nil, // only populated when used + }, + Votes: map[uint64]bool{}, + Progress: map[uint64]*Progress{}, + } + return p +} + +// ConfState returns a ConfState representing the active configuration. +func (p *ProgressTracker) ConfState() pb.ConfState { + return pb.ConfState{ + Voters: p.Voters[0].Slice(), + VotersOutgoing: p.Voters[1].Slice(), + Learners: quorum.MajorityConfig(p.Learners).Slice(), + LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(), + AutoLeave: p.AutoLeave, + } +} + +// IsSingleton returns true if (and only if) there is only one voting member +// (i.e. the leader) in the current configuration. +func (p *ProgressTracker) IsSingleton() bool { + return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0 +} + +type matchAckIndexer map[uint64]*Progress + +var _ quorum.AckedIndexer = matchAckIndexer(nil) + +// AckedIndex implements IndexLookuper. +func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) { + pr, ok := l[id] + if !ok { + return 0, false + } + return quorum.Index(pr.Match), true +} + +// Committed returns the largest log index known to be committed based on what +// the voting members of the group have acknowledged. +func (p *ProgressTracker) Committed() uint64 { + return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress))) +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// Visit invokes the supplied closure for all tracked progresses in stable order. +func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) { + n := len(p.Progress) + // We need to sort the IDs and don't want to allocate since this is hot code. + // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`, + // see there for details. + var sl [7]uint64 + ids := sl[:] + if len(sl) >= n { + ids = sl[:n] + } else { + ids = make([]uint64, n) + } + for id := range p.Progress { + n-- + ids[n] = id + } + insertionSort(ids) + for _, id := range ids { + f(id, p.Progress[id]) + } +} + +// QuorumActive returns true if the quorum is active from the view of the local +// raft state machine. Otherwise, it returns false. +func (p *ProgressTracker) QuorumActive() bool { + votes := map[uint64]bool{} + p.Visit(func(id uint64, pr *Progress) { + if pr.IsLearner { + return + } + votes[id] = pr.RecentActive + }) + + return p.Voters.VoteResult(votes) == quorum.VoteWon +} + +// VoterNodes returns a sorted slice of voters. +func (p *ProgressTracker) VoterNodes() []uint64 { + m := p.Voters.IDs() + nodes := make([]uint64, 0, len(m)) + for id := range m { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// LearnerNodes returns a sorted slice of learners. +func (p *ProgressTracker) LearnerNodes() []uint64 { + if len(p.Learners) == 0 { + return nil + } + nodes := make([]uint64, 0, len(p.Learners)) + for id := range p.Learners { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// ResetVotes prepares for a new round of vote counting via recordVote. +func (p *ProgressTracker) ResetVotes() { + p.Votes = map[uint64]bool{} +} + +// RecordVote records that the node with the given id voted for this Raft +// instance if v == true (and declined it otherwise). +func (p *ProgressTracker) RecordVote(id uint64, v bool) { + _, ok := p.Votes[id] + if !ok { + p.Votes[id] = v + } +} + +// TallyVotes returns the number of granted and rejected Votes, and whether the +// election outcome is known. +func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) { + // Make sure to populate granted/rejected correctly even if the Votes slice + // contains members no longer part of the configuration. This doesn't really + // matter in the way the numbers are used (they're informational), but might + // as well get it right. + for id, pr := range p.Progress { + if pr.IsLearner { + continue + } + v, voted := p.Votes[id] + if !voted { + continue + } + if v { + granted++ + } else { + rejected++ + } + } + result := p.Voters.VoteResult(p.Votes) + return granted, rejected, result +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/util.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/util.go new file mode 100644 index 00000000000..785cf735d5d --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/raft/util.go @@ -0,0 +1,233 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "strings" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +func DescribeHardState(hs pb.HardState) string { + var buf strings.Builder + fmt.Fprintf(&buf, "Term:%d", hs.Term) + if hs.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", hs.Vote) + } + fmt.Fprintf(&buf, " Commit:%d", hs.Commit) + return buf.String() +} + +func DescribeSoftState(ss SoftState) string { + return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState) +} + +func DescribeConfState(state pb.ConfState) string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, + ) +} + +func DescribeSnapshot(snap pb.Snapshot) string { + m := snap.Metadata + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) +} + +func DescribeReady(rd Ready, f EntryFormatter) string { + var buf strings.Builder + if rd.SoftState != nil { + fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState)) + buf.WriteByte('\n') + } + if !IsEmptyHardState(rd.HardState) { + fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState)) + buf.WriteByte('\n') + } + if len(rd.ReadStates) > 0 { + fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates) + } + if len(rd.Entries) > 0 { + buf.WriteString("Entries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.Entries, f)) + } + if !IsEmptySnap(rd.Snapshot) { + fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot)) + } + if len(rd.CommittedEntries) > 0 { + buf.WriteString("CommittedEntries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f)) + } + if len(rd.Messages) > 0 { + buf.WriteString("Messages:\n") + for _, msg := range rd.Messages { + fmt.Fprint(&buf, DescribeMessage(msg, f)) + buf.WriteByte('\n') + } + } + if buf.Len() > 0 { + return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String()) + } + return "" +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + } + return buf.String() +} + +// PayloadSize is the size of the payload of this Entry. Notably, it does not +// depend on its Index or Term. +func PayloadSize(e pb.Entry) int { + return len(e.Data) +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + if f == nil { + f = func(data []byte) string { return fmt.Sprintf("%q", data) } + } + + formatConfChange := func(cc pb.ConfChangeI) string { + // TODO(tbg): give the EntryFormatter a type argument so that it gets + // a chance to expose the Context. + return pb.ConfChangesToString(cc.AsV2().Changes) + } + + var formatted string + switch e.Type { + case pb.EntryNormal: + formatted = f(e.Data) + case pb.EntryConfChange: + var cc pb.ConfChange + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + case pb.EntryConfChangeV2: + var cc pb.ConfChangeV2 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + } + if formatted != "" { + formatted = " " + formatted + } + return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted) +} + +// DescribeEntries calls DescribeEntry for each Entry, adding a newline to +// each. +func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { + var buf bytes.Buffer + for _, e := range ents { + _, _ = buf.WriteString(DescribeEntry(e, f) + "\n") + } + return buf.String() +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} + +func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { + err := cs1.Equivalent(cs2) + if err == nil { + return + } + l.Panic(err) +} diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml b/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml index 762d22c9727..58957222a33 100644 --- a/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml +++ b/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml @@ -3,11 +3,9 @@ language: go go_import_path: go.uber.org/atomic go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.x # latest release + - 1.7 + - 1.8 + - 1.9 cache: directories: diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/README.md b/cluster-autoscaler/vendor/go.uber.org/atomic/README.md index a871d2b5f59..6505abf65cb 100644 --- a/cluster-autoscaler/vendor/go.uber.org/atomic/README.md +++ b/cluster-autoscaler/vendor/go.uber.org/atomic/README.md @@ -23,7 +23,7 @@ See the [documentation][doc] for a complete API specification. ## Development Status Stable. -___ +
Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml b/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml index fc3936befdd..5ffa8fed485 100644 --- a/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml +++ b/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml @@ -9,7 +9,7 @@ env: go: - 1.7 - 1.8 - - 1.9 + - tip cache: directories: diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/error.go b/cluster-autoscaler/vendor/go.uber.org/multierr/error.go index 150fd95d91a..de6ce4736c8 100644 --- a/cluster-autoscaler/vendor/go.uber.org/multierr/error.go +++ b/cluster-autoscaler/vendor/go.uber.org/multierr/error.go @@ -33,7 +33,7 @@ // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Combine(reader.Close(), writer.Close()) // // This makes it possible to record resource cleanup failures from deferred // blocks with the help of named return values. diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/.travis.yml b/cluster-autoscaler/vendor/go.uber.org/zap/.travis.yml index a3321fa2dc0..ada5ebdcc9c 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/.travis.yml +++ b/cluster-autoscaler/vendor/go.uber.org/zap/.travis.yml @@ -1,8 +1,8 @@ language: go sudo: false go: - - 1.9.x - - 1.10.x + - 1.11.x + - 1.12.x go_import_path: go.uber.org/zap env: global: diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md b/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md index 17d5b49f339..28d10677eb6 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md +++ b/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + ## v1.9.1 (06 Aug 2018) Bugfixes: @@ -303,3 +320,8 @@ upgrade to the upcoming stable release. [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 [#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/Makefile b/cluster-autoscaler/vendor/go.uber.org/zap/Makefile index ef7893b3b03..073e9aa910a 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/Makefile +++ b/cluster-autoscaler/vendor/go.uber.org/zap/Makefile @@ -9,7 +9,7 @@ PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer int # stable release. GO_VERSION := $(shell go version | cut -d " " -f 3) GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 10 +LINTABLE_MINOR_VERSIONS := 12 ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) SHOULD_LINT := true endif @@ -45,7 +45,7 @@ ifdef SHOULD_LINT @echo "Installing test dependencies for vet..." @go test -i $(PKGS) @echo "Checking vet..." - @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) + @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log @echo "Checking lint..." @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) @echo "Checking for unresolved FIXMEs..." diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/global.go b/cluster-autoscaler/vendor/go.uber.org/zap/global.go index d02232e39fa..c1ac0507cd9 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/global.go +++ b/cluster-autoscaler/vendor/go.uber.org/zap/global.go @@ -31,7 +31,6 @@ import ( ) const ( - _stdLogDefaultDepth = 2 _loggerWriterDepth = 2 _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + "https://github.com/uber-go/zap/issues/new and reference this error: %v" diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/error.go b/cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go similarity index 56% rename from cluster-autoscaler/vendor/go.uber.org/atomic/error.go rename to cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go index 0489d19badb..6b5dbda8076 100644 --- a/cluster-autoscaler/vendor/go.uber.org/atomic/error.go +++ b/cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,38 +18,9 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +// See #682 for more information. +// +build go1.12 -// Error is an atomic type-safe wrapper around Value for errors -type Error struct{ v Value } +package zap -// errorHolder is non-nil holder for error object. -// atomic.Value panics on saving nil object, so err object needs to be -// wrapped with valid object first. -type errorHolder struct{ err error } - -// NewError creates new atomic error object -func NewError(err error) *Error { - e := &Error{} - if err != nil { - e.Store(err) - } - return e -} - -// Load atomically loads the wrapped error -func (e *Error) Load() error { - v := e.v.Load() - if v == nil { - return nil - } - - eh := v.(errorHolder) - return eh.err -} - -// Store atomically stores error. -// NOTE: a holder object is allocated on each Store call. -func (e *Error) Store(err error) { - e.v.Store(errorHolder{err: err}) -} +const _stdLogDefaultDepth = 1 diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go b/cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go new file mode 100644 index 00000000000..d3ab9af933e --- /dev/null +++ b/cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build !go1.12 + +package zap + +const _stdLogDefaultDepth = 2 diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/field.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/field.go index 6a5e33e2f79..ae772e4a170 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/field.go +++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/field.go @@ -160,7 +160,7 @@ func (f Field) AddTo(enc ObjectEncoder) { case NamespaceType: enc.OpenNamespace(f.Key) case StringerType: - enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + err = encodeStringer(f.Key, f.Interface, enc) case ErrorType: encodeError(f.Key, f.Interface.(error), enc) case SkipType: @@ -199,3 +199,14 @@ func addFields(enc ObjectEncoder, fields []Field) { fields[i].AddTo(enc) } } + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { + defer func() { + if v := recover(); v != nil { + err = fmt.Errorf("PANIC=%v", v) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return +} diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go index 2dc67d81e7e..9aec4eada31 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -137,6 +137,9 @@ func (enc *jsonEncoder) resetReflectBuf() { if enc.reflectBuf == nil { enc.reflectBuf = bufferpool.Get() enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + + // For consistency with our custom JSON encoder. + enc.reflectEnc.SetEscapeHTML(false) } else { enc.reflectBuf.Reset() } diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/memory_encoder.go index 6ef85b09c7f..dfead0829d6 100644 --- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -158,7 +158,7 @@ func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { } func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519.go index d6f683ba3f7..c7f8c7e64ec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -2,6 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// In Go 1.13, the ed25519 package was promoted to the standard library as +// crypto/ed25519, and this package became a wrapper for the standard library one. +// +// +build !go1.13 + // Package ed25519 implements the Ed25519 signature algorithm. See // https://ed25519.cr.yp.to/. // diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go new file mode 100644 index 00000000000..d1448d8d220 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go @@ -0,0 +1,73 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/pkcs12.go index eff9ad3a98f..55f7691d48d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/pkcs12.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -7,6 +7,9 @@ // This implementation is distilled from https://tools.ietf.org/html/rfc7292 // and referenced documents. It is intended for decoding P12/PFX-stored // certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. package pkcs12 import ( @@ -100,7 +103,7 @@ func unmarshal(in []byte, out interface{}) error { return nil } -// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { encodedPassword, err := bmpString(password) if err != nil { @@ -208,7 +211,7 @@ func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) // Decode extracts a certificate and private key from pfxData. This function // assumes that there is only one certificate and only one private key in the -// pfxData. +// pfxData; if there are more use ToPEM instead. func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { encodedPassword, err := bmpString(password) if err != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index 9a887598ff4..2f04ee5b5c2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -7,6 +7,7 @@ package terminal import ( "bytes" "io" + "strconv" "sync" "unicode/utf8" ) @@ -159,6 +160,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { return keyClearScreen, b[1:] case 23: // ^W return keyDeleteWord, b[1:] + case 14: // ^N + return keyDown, b[1:] + case 16: // ^P + return keyUp, b[1:] } } @@ -267,34 +272,44 @@ func (t *Terminal) moveCursorToPos(pos int) { } func (t *Terminal) move(up, down, left, right int) { - movement := make([]rune, 3*(up+down+left+right)) - m := movement - for i := 0; i < up; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'A' - m = m[3:] - } - for i := 0; i < down; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'B' - m = m[3:] - } - for i := 0; i < left; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'D' - m = m[3:] - } - for i := 0; i < right; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'C' - m = m[3:] - } - - t.queue(movement) + m := []rune{} + + // 1 unit up can be expressed as ^[[A or ^[A + // 5 units up can be expressed as ^[[5A + + if up == 1 { + m = append(m, keyEscape, '[', 'A') + } else if up > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(up))...) + m = append(m, 'A') + } + + if down == 1 { + m = append(m, keyEscape, '[', 'B') + } else if down > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(down))...) + m = append(m, 'B') + } + + if right == 1 { + m = append(m, keyEscape, '[', 'C') + } else if right > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(right))...) + m = append(m, 'C') + } + + if left == 1 { + m = append(m, keyEscape, '[', 'D') + } else if left > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(left))...) + m = append(m, 'D') + } + + t.queue(m) } func (t *Terminal) clearLineToRight() { diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util.go index 731c89a284a..39110408409 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. @@ -25,7 +25,7 @@ type State struct { termios unix.Termios } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go new file mode 100644 index 00000000000..dfcd6278592 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go index 799f049f04e..9317ac7ede6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -21,7 +21,7 @@ import ( type State struct{} -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { return false } diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index 9e41b9f43f0..3d5f06a9f04 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -17,7 +17,7 @@ type State struct { termios unix.Termios } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { _, err := unix.IoctlGetTermio(fd, unix.TCGETA) return err == nil diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 8618955df73..5cfdf8f3f03 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -26,7 +26,7 @@ type State struct { mode uint32 } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 err := windows.GetConsoleMode(windows.Handle(fd), &st) @@ -64,13 +64,15 @@ func Restore(fd int, state *State) error { return windows.SetConsoleMode(windows.Handle(fd), state.mode) } -// GetSize returns the dimensions of the given terminal. +// GetSize returns the visible dimensions of the given terminal. +// +// These dimensions don't include any scrollback buffer height. func GetSize(fd int) (width, height int, err error) { var info windows.ConsoleScreenBufferInfo if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { return 0, 0, err } - return int(info.Size.X), int(info.Size.Y), nil + return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil } // ReadPassword reads a line of input from a terminal without local echo. This diff --git a/cluster-autoscaler/vendor/golang.org/x/net/bpf/vm_instructions.go b/cluster-autoscaler/vendor/golang.org/x/net/bpf/vm_instructions.go index f0d2e55bdc6..cf8947c3327 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -129,7 +129,8 @@ func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { offset := int(ins.Off) - if !inBounds(len(in), offset, 0) { + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { return 0, false } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/parse.go b/cluster-autoscaler/vendor/golang.org/x/net/html/parse.go index 488e8d3cd63..992cff2a33a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/parse.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/parse.go @@ -439,9 +439,6 @@ func (p *parser) resetInsertionMode() { case a.Select: if !last { for ancestor, first := n, p.oe[0]; ancestor != first; { - if ancestor == first { - break - } ancestor = p.oe[p.oe.index(ancestor)-1] switch ancestor.DataAtom { case a.Template: @@ -633,7 +630,16 @@ func inHeadIM(p *parser) bool { p.oe.pop() p.acknowledgeSelfClosingTag() return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + case a.Noscript: + p.addElement() + if p.scripting { + p.setOriginalIM() + p.im = textIM + } else { + p.im = inHeadNoscriptIM + } + return true + case a.Script, a.Title, a.Noframes, a.Style: p.addElement() p.setOriginalIM() p.im = textIM @@ -695,6 +701,49 @@ func inHeadIM(p *parser) bool { return false } +// 12.2.6.4.5. +func inHeadNoscriptIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: + return inHeadIM(p) + case a.Head, a.Noscript: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Noscript, a.Br: + default: + // Ignore the token. + return true + } + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) == 0 { + // It was all whitespace. + return inHeadIM(p) + } + case CommentToken: + return inHeadIM(p) + } + p.oe.pop() + if p.top().DataAtom != a.Head { + panic("html: the new current node will be a head element.") + } + p.im = inHeadIM + if p.tok.DataAtom == a.Noscript { + return true + } + return false +} + // Section 12.2.6.4.6. func afterHeadIM(p *parser) bool { switch p.tok.Type { @@ -904,7 +953,7 @@ func inBodyIM(p *parser) bool { case a.A: for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- { if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A { - p.inBodyEndTagFormatting(a.A) + p.inBodyEndTagFormatting(a.A, "a") p.oe.remove(n) p.afe.remove(n) break @@ -918,7 +967,7 @@ func inBodyIM(p *parser) bool { case a.Nobr: p.reconstructActiveFormattingElements() if p.elementInScope(defaultScope, a.Nobr) { - p.inBodyEndTagFormatting(a.Nobr) + p.inBodyEndTagFormatting(a.Nobr, "nobr") p.reconstructActiveFormattingElements() } p.addFormattingElement() @@ -1126,7 +1175,7 @@ func inBodyIM(p *parser) bool { case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6) case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U: - p.inBodyEndTagFormatting(p.tok.DataAtom) + p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data) case a.Applet, a.Marquee, a.Object: if p.popUntil(defaultScope, p.tok.DataAtom) { p.clearActiveFormattingElements() @@ -1137,7 +1186,7 @@ func inBodyIM(p *parser) bool { case a.Template: return inHeadIM(p) default: - p.inBodyEndTagOther(p.tok.DataAtom) + p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data) } case CommentToken: p.addChild(&Node{ @@ -1164,7 +1213,7 @@ func inBodyIM(p *parser) bool { return true } -func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { +func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { // This is the "adoption agency" algorithm, described at // https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency @@ -1186,7 +1235,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { } } if formattingElement == nil { - p.inBodyEndTagOther(tagAtom) + p.inBodyEndTagOther(tagAtom, tagName) return } feIndex := p.oe.index(formattingElement) @@ -1291,9 +1340,17 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { // inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM. // "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content // https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign -func (p *parser) inBodyEndTagOther(tagAtom a.Atom) { +func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) { for i := len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == tagAtom { + // Two element nodes have the same tag if they have the same Data (a + // string-typed field). As an optimization, for common HTML tags, each + // Data string is assigned a unique, non-zero DataAtom (a uint32-typed + // field), since integer comparison is faster than string comparison. + // Uncommon (custom) tags get a zero DataAtom. + // + // The if condition here is equivalent to (p.oe[i].Data == tagName). + if (p.oe[i].DataAtom == tagAtom) && + ((tagAtom != 0) || (p.oe[i].Data == tagName)) { p.oe = p.oe[:i] break } @@ -1687,8 +1744,9 @@ func inCellIM(p *parser) bool { return true } // Close the cell and reprocess. - p.popUntil(tableScope, a.Td, a.Th) - p.clearActiveFormattingElements() + if p.popUntil(tableScope, a.Td, a.Th) { + p.clearActiveFormattingElements() + } p.im = inRowIM return false } @@ -2242,6 +2300,33 @@ func (p *parser) parse() error { // // The input is assumed to be UTF-8 encoded. func Parse(r io.Reader) (*Node, error) { + return ParseWithOptions(r) +} + +// ParseFragment parses a fragment of HTML and returns the nodes that were +// found. If the fragment is the InnerHTML for an existing element, pass that +// element in context. +// +// It has the same intricacies as Parse. +func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { + return ParseFragmentWithOptions(r, context) +} + +// ParseOption configures a parser. +type ParseOption func(p *parser) + +// ParseOptionEnableScripting configures the scripting flag. +// https://html.spec.whatwg.org/multipage/webappapis.html#enabling-and-disabling-scripting +// +// By default, scripting is enabled. +func ParseOptionEnableScripting(enable bool) ParseOption { + return func(p *parser) { + p.scripting = enable + } +} + +// ParseWithOptions is like Parse, with options. +func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) { p := &parser{ tokenizer: NewTokenizer(r), doc: &Node{ @@ -2251,6 +2336,11 @@ func Parse(r io.Reader) (*Node, error) { framesetOK: true, im: initialIM, } + + for _, f := range opts { + f(p) + } + err := p.parse() if err != nil { return nil, err @@ -2258,12 +2348,8 @@ func Parse(r io.Reader) (*Node, error) { return p.doc, nil } -// ParseFragment parses a fragment of HTML and returns the nodes that were -// found. If the fragment is the InnerHTML for an existing element, pass that -// element in context. -// -// It has the same intricacies as Parse. -func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { +// ParseFragmentWithOptions is like ParseFragment, with options. +func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ([]*Node, error) { contextTag := "" if context != nil { if context.Type != ElementNode { @@ -2287,6 +2373,10 @@ func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { context: context, } + for _, f := range opts { + f(p) + } + root := &Node{ Type: ElementNode, DataAtom: a.Html, diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/frame.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/frame.go index b46791d1da2..514c126c5f8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/frame.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/frame.go @@ -643,7 +643,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { return f.WriteDataPadded(streamID, endStream, data, nil) } -// WriteData writes a DATA frame with optional padding. +// WriteDataPadded writes a DATA frame with optional padding. // // If pad is nil, the padding bit is not sent. // The length of pad must not exceed 255 bytes. diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go index 6f8e8b0d98b..5e01ce9ab01 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go @@ -283,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, Handler: h, BaseConfig: hs, }) @@ -294,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { // ServeConnOpts are options for the Server.ServeConn method. type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *http.Server @@ -304,6 +321,13 @@ type ServeConnOpts struct { Handler http.Handler } +func (o *ServeConnOpts) context() context.Context { + if o.Context != nil { + return o.Context + } + return context.Background() +} + func (o *ServeConnOpts) baseConfig() *http.Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig @@ -449,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(opts.context()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, http.ServerContextKey, hs) @@ -2337,7 +2361,16 @@ type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } + +func (rws *responseWriterState) hasNonemptyTrailers() bool { + for _, trailer := range rws.trailers { + if _, ok := rws.handlerHeader[trailer]; ok { + return true + } + } + return false +} // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be @@ -2437,7 +2470,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.promoteUndeclaredTrailers() } - endStream := rws.handlerDone && !rws.hasTrailers() + // only send trailers if they have actually been defined by the + // server handler. + hasNonemptyTrailers := rws.hasNonemptyTrailers() + endStream := rws.handlerDone && !hasNonemptyTrailers if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { @@ -2446,7 +2482,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { } } - if rws.handlerDone && rws.hasTrailers() { + if rws.handlerDone && hasNonemptyTrailers { err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, h: rws.handlerHeader, diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go index f272e8f9fac..aeac7d8a51a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go @@ -28,6 +28,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "golang.org/x/net/http/httpguts" @@ -199,6 +200,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: @@ -440,7 +442,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - traceGotConn(req, cc) + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) if err != nil && retry <= 6 { if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { @@ -989,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: @@ -1411,7 +1414,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // followed by the query production (see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) - f(":method", req.Method) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) if req.Method != "CONNECT" { f(":path", path) f(":scheme", req.URL.Scheme) @@ -2555,15 +2562,15 @@ func traceGetConn(req *http.Request, hostPort string) { trace.GetConn(hostPort) } -func traceGotConn(req *http.Request, cc *ClientConn) { +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { return } ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused + ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { ci.IdleTime = time.Now().Sub(cc.lastActive) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/writesched_random.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/writesched_random.go index 36d7919f16a..9a7b9e581c1 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/writesched_random.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/writesched_random.go @@ -19,7 +19,8 @@ type randomWriteScheduler struct { zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. @@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { + for streamID, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } return wr, true } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna10.0.0.go similarity index 99% rename from cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go rename to cluster-autoscaler/vendor/golang.org/x/net/idna/idna10.0.0.go index 346fe4423ed..a98a31f4038 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -4,14 +4,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build go1.10 + // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to // deal with the transition from IDNA2003. // // IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC // 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. -// UTS #46 is defined in http://www.unicode.org/reports/tr46. -// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the // differences between these two standards. package idna // import "golang.org/x/net/idna" @@ -297,7 +299,7 @@ func (e runeError) Error() string { } // process implements the algorithm described in section 4 of UTS #46, -// see http://www.unicode.org/reports/tr46. +// see https://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error var isBidi bool diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/idna9.0.0.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna9.0.0.go new file mode 100644 index 00000000000..8842146b5d9 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -0,0 +1,682 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) + } + i += sz + } + return s, nil +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables10.0.0.go similarity index 99% rename from cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go rename to cluster-autoscaler/vendor/golang.org/x/net/idna/tables10.0.0.go index f910b269144..54fddb4b16c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,11 +1,13 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +// +build go1.10,!go1.13 + package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "10.0.0" -var mappings string = "" + // Size: 8176 bytes +var mappings string = "" + // Size: 8175 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + @@ -4554,4 +4556,4 @@ var idnaSparseValues = [1915]valueRange{ {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E +// Total table size 42114 bytes (41KiB); checksum: 355A58A4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables11.0.0.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables11.0.0.go new file mode 100644 index 00000000000..c515d7ad2a2 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -0,0 +1,4653 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, + 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, + 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168, + 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179, + 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d, + 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184, + 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 276 entries, 552 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca} + +// idnaSparseValues: 1997 entries, 7988 bytes +var idnaSparseValues = [1997]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x86 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x224 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22e + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23a + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x246 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x252 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25f + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x269 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x27a + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x289 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x296 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a4 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ac + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xbf}, + // Block 0x53, offset 0x2c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2c7 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2cd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f5 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x5b, offset 0x2f8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x2fe + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x302 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x304 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x307 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x309 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x316 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x319 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x328 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32c + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x334 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x33d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x342 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x348 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x34e + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x367 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x376 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x383 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x38d + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a0 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b1 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3ba + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3ca + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3d7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e1 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e6 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3fc + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3fe + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x402 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x404 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x408 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x411 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x417 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41b + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x435 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x43d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x443 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x44f + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x45e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46c + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x472 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x479 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x487 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x490 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x493 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x498 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a4 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4aa + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4af + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4be + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4c7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4d7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4de + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4ed + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4ef + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4f9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x502 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xac, offset 0x50e + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xad, offset 0x516 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xae, offset 0x51c + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xaf, offset 0x525 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb0, offset 0x531 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb1, offset 0x538 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb2, offset 0x541 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb3, offset 0x54b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb4, offset 0x552 + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb5, offset 0x560 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb6, offset 0x56d + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb7, offset 0x57a + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb8, offset 0x583 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb9, offset 0x587 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xba, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbb, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbc, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbe, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc1, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc2, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5e2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc4, offset 0x5eb + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc5, offset 0x5ee + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f3 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x5fe + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc8, offset 0x607 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc9, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xca, offset 0x616 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xcb, offset 0x620 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xcc, offset 0x629 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xcd, offset 0x635 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xce, offset 0x642 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcf, offset 0x64f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd0, offset 0x65d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd1, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xd2, offset 0x667 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xd3, offset 0x66c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xd4, offset 0x66f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xd5, offset 0x672 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd6, offset 0x675 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd7, offset 0x67c + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd8, offset 0x683 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd9, offset 0x687 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xda, offset 0x692 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xdb, offset 0x695 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x698 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xdd, offset 0x69b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x6a1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xdf, offset 0x6a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xe0, offset 0x6aa + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x6ad + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe2, offset 0x6b0 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe3, offset 0x6b3 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6b6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xe5, offset 0x6b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xe6, offset 0x6be + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xe7, offset 0x6c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe9, offset 0x6cf + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xea, offset 0x6de + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xeb, offset 0x6ea + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xec, offset 0x6ee + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xed, offset 0x6f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xef, offset 0x6fc + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf0, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x705 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xf2, offset 0x70e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xf3, offset 0x719 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf4, offset 0x71f + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xf5, offset 0x727 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0xf6, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf7, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xf8, offset 0x731 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xf9, offset 0x735 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xfa, offset 0x73b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xfc, offset 0x746 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xfd, offset 0x749 + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xfe, offset 0x759 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xff, offset 0x760 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x100, offset 0x763 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0xbf}, + // Block 0x101, offset 0x766 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x102, offset 0x76a + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x103, offset 0x770 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x104, offset 0x775 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x105, offset 0x77a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x782 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x107, offset 0x787 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x108, offset 0x78b + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x78f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10a, offset 0x792 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x10b, offset 0x795 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x10c, offset 0x799 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x10d, offset 0x79d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x10e, offset 0x7a0 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x10f, offset 0x7b0 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x110, offset 0x7c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x111, offset 0x7c6 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x112, offset 0x7c8 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x7ca + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42466 bytes (41KiB); checksum: 355A58A4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables9.0.0.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables9.0.0.go new file mode 100644 index 00000000000..8b65fa16783 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -0,0 +1,4486 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 124: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 124 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 126 blocks, 8064 entries, 16128 bytes +// The third block is the zero block. +var idnaValues = [8064]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308, + 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008, + 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308, + 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308, + 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1, + 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308, + 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008, + 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008, + // Block 0x16, offset 0x580 + 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008, + 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008, + 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008, + 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008, + 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008, + 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040, + 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040, + 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008, + 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1, + 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018, + 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018, + 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040, + // Block 0x18, offset 0x600 + 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040, + 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008, + 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308, + 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040, + 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040, + 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308, + 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008, + 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008, + 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040, + 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040, + 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040, + 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9, + 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018, + 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040, + 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040, + 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040, + 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308, + 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040, + 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040, + 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018, + 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018, + // Block 0x20, offset 0x800 + 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008, + 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308, + // Block 0x21, offset 0x840 + 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040, + 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040, + 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040, + 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008, + 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018, + 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018, + 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008, + 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040, + 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040, + 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008, + 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308, + 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308, + 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040, + 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59, + 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008, + // Block 0x25, offset 0x940 + 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018, + 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308, + 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308, + 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11, + 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308, + 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308, + 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308, + 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008, + 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41, + 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008, + 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1, + 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011, + 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041, + 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9, + 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099, + 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269, + 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1, + 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008, + 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008, + 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008, + 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169, + 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9, + 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251, + 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9, + 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359, + 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1, + 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008, + 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008, + 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008, + 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008, + 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045, + 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008, + 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045, + 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008, + 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045, + 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045, + 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489, + 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1, + 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040, + // Block 0x2c, offset 0xb00 + 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1, + 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591, + 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1, + 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1, + 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771, + 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891, + 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831, + 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951, + 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459, + 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040, + 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489, + 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2, + 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa, + 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9, + 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a, + 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0, + 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018, + 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a, + 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018, + 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018, + 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018, + 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018, + 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340, + 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340, + 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61, + 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd, + 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71, + // Block 0x30, offset 0xc00 + 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61, + 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5, + 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09, + 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359, + 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040, + 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018, + 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018, + 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040, + // Block 0x31, offset 0xc40 + 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e, + 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249, + 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41, + 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018, + 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269, + 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018, + 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018, + 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09, + 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9, + 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd, + 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109, + // Block 0x32, offset 0xc80 + 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9, + 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018, + 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151, + 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279, + 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399, + 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439, + 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369, + 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61, + 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451, + 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5, + 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018, + 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040, + 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51, + 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601, + 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691, + 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26, + 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6, + 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a, + // Block 0x34, offset 0xd00 + 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a, + 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46, + 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06, + 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6, + 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86, + 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46, + 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199, + 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259, + // Block 0x35, offset 0xd40 + 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99, + 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089, + 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9, + 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249, + 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71, + 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9, + 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1, + 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018, + 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018, + 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018, + 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018, + // Block 0x36, offset 0xd80 + 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008, + 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008, + 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008, + 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008, + 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008, + 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd, + 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d, + 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9, + 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d, + 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008, + 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9, + // Block 0x37, offset 0xdc0 + 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008, + 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008, + 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008, + 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008, + 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008, + 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008, + 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018, + 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308, + 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d, + 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d, + 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d, + 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040, + 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040, + 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040, + 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040, + 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040, + 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040, + 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008, + 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018, + 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018, + 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018, + 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018, + 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018, + 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018, + 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018, + 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018, + 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd, + 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd, + 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d, + 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d, + 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d, + 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd, + 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d, + 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd, + 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d, + 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd, + 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd, + 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d, + 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd, + 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d, + 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008, + 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008, + 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008, + 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008, + 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040, + 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd, + 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018, + 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761, + 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1, + 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881, + 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd, + 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d, + 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d, + 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd, + 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d, + 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d, + 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d, + 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd, + 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd, + 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d, + 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d, + 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd, + 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d, + 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999, + 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29, + 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69, + 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69, + 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15, + 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75, + 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded, + 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d, + 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5, + 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d, + 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d, + 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd, + 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9, + 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1, + 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9, + 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549, + 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1, + 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11, + 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91, + 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9, + 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011, + 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209, + 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361, + // Block 0x40, offset 0x1000 + 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541, + 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781, + 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979, + 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89, + 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1, + 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99, + 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9, + 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9, + 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069, + 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9, + 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9, + // Block 0x41, offset 0x1040 + 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271, + 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9, + 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed, + 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371, + 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9, + 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d, + 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211, + 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1, + 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599, + 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9, + 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611, + // Block 0x42, offset 0x1080 + 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671, + 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709, + 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781, + 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1, + 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811, + 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901, + 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1, + 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11, + 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31, + 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51, + 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040, + 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040, + 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040, + // Block 0x48, offset 0x1200 + 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575, + 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635, + 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008, + 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715, + 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5, + 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008, + 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008, + 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935, + 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5, + 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5, + 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35, + // Block 0x49, offset 0x1240 + 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35, + 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5, + 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19, + 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91, + 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040, + 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040, + 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040, + 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040, + 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040, + 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040, + 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001, + 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040, + 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040, + 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9, + 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1, + 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149, + 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2, + 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1, + 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1, + 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479, + 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040, + 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659, + 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721, + 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751, + 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769, + 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799, + 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1, + 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1, + 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9, + 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829, + 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871, + 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9, + 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9, + 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919, + 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931, + 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961, + 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991, + 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1, + 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818, + 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818, + 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040, + 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040, + 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040, + 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09, + 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479, + 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81, + 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1, + 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19, + 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91, + 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1, + 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09, + // Block 0x4e, offset 0x1380 + 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1, + 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1, + 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1, + 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991, + 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81, + 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a, + 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99, + 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89, + 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79, + 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19, + 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649, + 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9, + 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49, + 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21, + 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9, + 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01, + 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91, + 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9, + 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171, + 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289, + 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329, + // Block 0x50, offset 0x1400 + 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1, + 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621, + 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739, + 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1, + 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9, + 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29, + 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079, + 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1, + 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171, + 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261, + 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301, + // Block 0x51, offset 0x1440 + 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1, + 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1, + 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171, + 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261, + 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351, + 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441, + 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509, + 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1, + 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081, + 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239, + 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018, + // Block 0x52, offset 0x1480 + 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040, + 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609, + 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721, + 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839, + 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919, + 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9, + 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9, + 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9, + 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1, + 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79, + // Block 0x53, offset 0x14c0 + 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989, + 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040, + 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040, + 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040, + 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040, + 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040, + 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9, + 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12, + 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040, + // Block 0x54, offset 0x1500 + 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0, + 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0, + 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55, + 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75, + 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308, + 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308, + 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308, + 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2, + 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35, + 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55, + // Block 0x55, offset 0x1540 + 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018, + 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56, + 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95, + 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa, + 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95, + 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99, + 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda, + 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040, + 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040, + 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081, + 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141, + 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171, + 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1, + 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1, + 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201, + 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219, + 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249, + 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291, + 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1, + 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9, + 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321, + 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339, + 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369, + 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381, + 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1, + 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9, + 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9, + 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1, + 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441, + 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9, + 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0, + // Block 0x58, offset 0x1600 + 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea, + 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2, + 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9, + 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81, + 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2, + 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159, + 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41, + 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9, + 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9, + 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a, + 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a, + // Block 0x59, offset 0x1640 + 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09, + 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51, + 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039, + 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279, + 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a, + 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115, + 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5, + 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295, + 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355, + 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415, + 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215, + // Block 0x5a, offset 0x1680 + 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515, + 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595, + 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5, + 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655, + 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115, + 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735, + 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5, + 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5, + 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5, + 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5, + 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5, + 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715, + 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040, + 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935, + 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040, + 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6, + 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35, + 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040, + 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040, + 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08, + 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808, + 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08, + 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908, + 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08, + 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808, + 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040, + 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18, + 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818, + 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08, + 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08, + 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08, + 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040, + 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040, + 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18, + 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818, + 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040, + 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008, + 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008, + 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040, + 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008, + 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008, + 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008, + 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308, + 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040, + 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040, + 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199, + 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359, + 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269, + 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369, + 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9, + 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259, + 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99, + 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089, + 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9, + 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249, + 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359, + // Block 0x61, offset 0x1840 + 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269, + 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369, + 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9, + 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259, + 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99, + 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089, + 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9, + 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249, + 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71, + 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9, + 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369, + // Block 0x62, offset 0x1880 + 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9, + 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259, + 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99, + 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089, + 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040, + 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71, + 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9, + 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1, + 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199, + 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99, + 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089, + 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9, + 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249, + 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71, + 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9, + 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1, + 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199, + 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359, + 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269, + 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089, + // Block 0x64, offset 0x1900 + 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9, + 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71, + 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9, + 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040, + 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199, + 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359, + 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269, + 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369, + 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9, + 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040, + 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9, + 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040, + 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199, + 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359, + 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269, + 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369, + 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9, + 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259, + 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99, + 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9, + // Block 0x66, offset 0x1980 + 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1, + 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199, + 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359, + 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269, + 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369, + 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9, + 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259, + 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99, + 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089, + 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9, + 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359, + 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269, + 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369, + 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9, + 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259, + 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99, + 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089, + 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9, + 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249, + 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71, + 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369, + 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9, + 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259, + 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99, + 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089, + 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9, + 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249, + 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71, + 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9, + 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1, + 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259, + 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99, + 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089, + 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9, + 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249, + 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71, + 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9, + 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1, + 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199, + 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359, + 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089, + 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9, + 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249, + 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71, + 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9, + 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1, + 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099, + 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429, + 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71, + 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9, + 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9, + 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11, + 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109, + 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1, + 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429, + 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099, + 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429, + 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71, + 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9, + 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01, + 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11, + 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109, + 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1, + 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429, + 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099, + 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429, + 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71, + 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9, + 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01, + 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1, + 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109, + 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1, + 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429, + 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099, + 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429, + 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71, + 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9, + 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01, + 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1, + 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41, + 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1, + 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429, + 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099, + 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429, + 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71, + 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9, + 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01, + 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1, + 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41, + 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1, + 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429, + 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079, + 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1, + 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61, + 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9, + 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81, + 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079, + 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1, + 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61, + 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411, + 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1, + 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231, + 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949, + 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231, + 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949, + 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411, + 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231, + 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249, + 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02, + 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2, + 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72, + 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32, + 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2, + 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2, + 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040, + 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199, + 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359, + 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089, + 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1, + 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289, + 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349, + 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409, + 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9, + 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589, + 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649, + 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709, + 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79, + 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39, + 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9, + 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39, + 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9, + 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79, + 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39, + 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9, + 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059, + 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9, + 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239, + 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9, + 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399, + 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459, + 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309, + 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559, + 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9, + 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679, + 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9, + 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d, + 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9, + 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959, + 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d, + 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d, + 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9, + 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99, + 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9, + 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9, + 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99, + 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39, + 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639, + 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9, + 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d, + 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9, + 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d, + 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd, + 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979, + 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19, + 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d, + 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d, + 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99, + 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39, + 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9, + 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39, + 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd, + 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19, + 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9, + 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59, + 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd, + 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d, + 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d, + 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d, + 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879, + 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919, + 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd, + 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9, + 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99, + 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39, + 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9, + 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d, + 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19, + 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9, + 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59, + 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9, + 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d, + 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040, + 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040, + 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040, + 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040, + 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3, + 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b, + // Block 0x6, offset 0x180 + 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca, + 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1, + 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6, + 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2, + 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9, + 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1, + 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b, + 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51, + 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f, + 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138, + 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba, + 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a, + 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168, + 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c, + 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba, + // Block 0x1d, offset 0x740 + 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba, + 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba, + 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba, + 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba, + 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a, + 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d, + 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba, + 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba, + 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba, + 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba, + 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba, + 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba, + 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 258 entries, 516 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a} + +// idnaSparseValues: 1869 entries, 7476 bytes +var idnaSparseValues = [1869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0c08, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x85}, + {value: 0x0c08, lo: 0x86, hi: 0x87}, + {value: 0x0a08, lo: 0x88, hi: 0x88}, + {value: 0x0c08, lo: 0x89, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0x93}, + {value: 0x0c08, lo: 0x94, hi: 0x94}, + {value: 0x0a08, lo: 0x95, hi: 0x95}, + {value: 0x0808, lo: 0x96, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x93 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0x10, offset 0x98 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa1 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xd8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x16, offset 0xe9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x17, offset 0xf3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x18, offset 0xfa + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x19, offset 0x107 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x1a, offset 0x118 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1e, offset 0x147 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1f, offset 0x151 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x20, offset 0x153 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x21, offset 0x158 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x22, offset 0x15b + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x23, offset 0x15e + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x24, offset 0x160 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x25, offset 0x16c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x26, offset 0x177 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x17f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x28, offset 0x185 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x29, offset 0x18b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2a, offset 0x190 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2b, offset 0x195 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2c, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2d, offset 0x19c + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2e, offset 0x1a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2f, offset 0x1a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x30, offset 0x1b3 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x31, offset 0x1bd + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x32, offset 0x1c3 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x33, offset 0x1d4 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x34, offset 0x1de + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x35, offset 0x1e1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x36, offset 0x1e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x37, offset 0x1ec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x38, offset 0x1f9 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x39, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x3a, offset 0x205 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3b, offset 0x20c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x214 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x224 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x230 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3f, offset 0x232 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23c + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x41, offset 0x248 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x42, offset 0x254 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x43, offset 0x260 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x44, offset 0x268 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x46, offset 0x277 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x47, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x48, offset 0x28c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x49, offset 0x297 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x4a, offset 0x29b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4b, offset 0x2a4 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4c, offset 0x2ac + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x2b2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4e, offset 0x2b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4f, offset 0x2ba + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x50, offset 0x2bd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x51, offset 0x2c1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x52, offset 0x2c7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x53, offset 0x2cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x54, offset 0x2cf + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2dc + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x5a, offset 0x2fc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x306 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5c, offset 0x30a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5d, offset 0x30d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5e, offset 0x313 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5f, offset 0x317 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x319 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x61, offset 0x31c + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x62, offset 0x31e + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x63, offset 0x321 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x32e + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x66, offset 0x33d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x341 + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x68, offset 0x346 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x69, offset 0x349 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x6a, offset 0x34d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6b, offset 0x352 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6c, offset 0x357 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6f, offset 0x372 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x70, offset 0x378 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x71, offset 0x37c + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x38b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x75, offset 0x3a2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x3ad + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3b5 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3c6 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x79, offset 0x3cf + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x7a, offset 0x3df + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ec + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7e, offset 0x408 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7f, offset 0x40c + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x83, offset 0x419 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x84, offset 0x41d + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x85, offset 0x426 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x86, offset 0x42c + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x430 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x440 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x89, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x8a, offset 0x44f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x452 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8c, offset 0x458 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8d, offset 0x45f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8e, offset 0x464 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x468 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x90, offset 0x46e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x91, offset 0x473 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x92, offset 0x47c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x93, offset 0x481 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x94, offset 0x487 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x95, offset 0x48e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x96, offset 0x495 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x49c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x4a5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x9a, offset 0x4a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x4b9 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9d, offset 0x4bf + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x4c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9f, offset 0x4cb + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x4d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0xa1, offset 0x4d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa2, offset 0x4dc + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa3, offset 0x4ec + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa4, offset 0x4f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x4f7 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa6, offset 0x4fb + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa7, offset 0x502 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa9, offset 0x507 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xaa, offset 0x50a + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xab, offset 0x50e + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x512 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x518 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xae, offset 0x521 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x52d + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb0, offset 0x534 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb1, offset 0x53d + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb2, offset 0x545 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x54c + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb4, offset 0x55a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x567 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb6, offset 0x574 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb8, offset 0x581 + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb9, offset 0x58f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x597 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbb, offset 0x5a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5ab + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbd, offset 0x5b1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5b9 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbf, offset 0x5c2 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc0, offset 0x5cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc1, offset 0x5cf + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc2, offset 0x5db + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5e3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc5, offset 0x5e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f0 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc7, offset 0x5f9 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc8, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc9, offset 0x608 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xca, offset 0x60d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xcb, offset 0x610 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcc, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcd, offset 0x616 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xce, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcf, offset 0x624 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd0, offset 0x628 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd1, offset 0x633 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd2, offset 0x636 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x63c + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd4, offset 0x641 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd5, offset 0x645 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd6, offset 0x648 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd7, offset 0x64b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd8, offset 0x64e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd9, offset 0x653 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xda, offset 0x65d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xdb, offset 0x660 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xdc, offset 0x664 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xdd, offset 0x673 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x67f + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdf, offset 0x683 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe0, offset 0x688 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x68d + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x691 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe3, offset 0x696 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x69f + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe5, offset 0x6aa + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe6, offset 0x6b0 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xea, offset 0x6c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xeb, offset 0x6cc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xec, offset 0x6d1 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xed, offset 0x6d4 + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xee, offset 0x6e2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xef, offset 0x6e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf0, offset 0x6ec + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf1, offset 0x6ef + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf2, offset 0x6f3 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf3, offset 0x6f9 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf4, offset 0x6fe + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf5, offset 0x708 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf6, offset 0x70d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf7, offset 0x710 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf8, offset 0x713 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf9, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xfa, offset 0x719 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xfb, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfc, offset 0x720 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfd, offset 0x730 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xff, offset 0x746 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x100, offset 0x748 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x101, offset 0x74a + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41662 bytes (40KiB); checksum: 355A58A4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 1eb07d26dee..0a73e277e09 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go index d1d0c2de54b..14dbb3ad42d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index 63f0534fa76..27be0efaca9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index a4e71226f80..e581011b055 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_aix.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_aix.go new file mode 100644 index 00000000000..c9d05b26198 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_aix.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_darwin.go index 14e28c0b45c..b780bc67ab6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go index 14e28c0b45c..b780bc67ab6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_freebsd.go index 14e28c0b45c..b780bc67ab6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_freebsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_linux.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_linux.go index ce9ec2f6d72..6c5c11dcc46 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_linux.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -18,14 +18,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_netbsd.go index 3f84335699f..3d3b7763954 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_netbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_openbsd.go index 14e28c0b45c..b780bc67ab6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_openbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_solaris.go index 14e28c0b45c..b780bc67ab6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -16,14 +16,6 @@ package socket */ import "C" -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - type iovec C.struct_iovec type msghdr C.struct_msghdr diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/error_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/error_unix.go index 93dff918028..f14872d3d35 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index afb34ad58eb..dfeda752be2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build aix darwin dragonfly freebsd linux netbsd openbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_stub.go index c87d2a9339d..a746e90e307 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go index 2e80a9cb747..1a7f2792f25 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !linux,!netbsd +// +build !aix,!linux,!netbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 3c42ea7ad85..f1100683a57 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux netbsd +// +build aix linux netbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go index 5567afc88da..77f44c1f129 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index b8c87b72b94..c5562dd66ad 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd +// +build aix darwin dragonfly freebsd netbsd package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 610fc4f3bba..e731833a262 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index 64e8173352a..873490a7ae9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn.go index d6871d55f72..b07b8900506 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package socket import ( diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 499164a3fbf..1f4cb3b36e8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 // +build linux package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index b21d2e6418d..a9720118122 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go index f78832aa4a7..fe5bb942ba6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -2,17 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 // +build !linux package socket -import "errors" - func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index 96733cbe1b9..b8cea6fe534 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -2,17 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package socket -import "errors" - func (c *Conn) recvMsg(m *Message, flags int) error { - return errors.New("not implemented") + return errNotImplemented } func (c *Conn) sendMsg(m *Message, flags int) error { - return errors.New("not implemented") + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_stub.go deleted file mode 100644 index d2add1a0aa9..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/rawconn_stub.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package socket - -import "errors" - -func (c *Conn) recvMsg(m *Message, flags int) error { - return errors.New("not implemented") -} - -func (c *Conn) sendMsg(m *Message, flags int) error { - return errors.New("not implemented") -} - -func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") -} - -func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/reflect.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/reflect.go deleted file mode 100644 index bb179f11d8c..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/reflect.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package socket - -import ( - "errors" - "net" - "os" - "reflect" - "runtime" -) - -// A Conn represents a raw connection. -type Conn struct { - c net.Conn -} - -// NewConn returns a new raw connection. -func NewConn(c net.Conn) (*Conn, error) { - return &Conn{c: c}, nil -} - -func (o *Option) get(c *Conn, b []byte) (int, error) { - s, err := socketOf(c.c) - if err != nil { - return 0, err - } - n, err := getsockopt(s, o.Level, o.Name, b) - return n, os.NewSyscallError("getsockopt", err) -} - -func (o *Option) set(c *Conn, b []byte) error { - s, err := socketOf(c.c) - if err != nil { - return err - } - return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) -} - -func socketOf(c net.Conn) (uintptr, error) { - switch c.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - v := reflect.ValueOf(c) - switch e := v.Elem(); e.Kind() { - case reflect.Struct: - fd := e.FieldByName("conn").FieldByName("fd") - switch e := fd.Elem(); e.Kind() { - case reflect.Struct: - sysfd := e.FieldByName("sysfd") - if runtime.GOOS == "windows" { - return uintptr(sysfd.Uint()), nil - } - return uintptr(sysfd.Int()), nil - } - } - } - return 0, errors.New("invalid type") -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/socket.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/socket.go index 5f9730e6d97..23571b8d4dc 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/socket.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/socket.go @@ -9,9 +9,12 @@ package socket // import "golang.org/x/net/internal/socket" import ( "errors" "net" + "runtime" "unsafe" ) +var errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + // An Option represents a sticky socket option. type Option struct { Level int // level diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys.go index 4f0eead138a..ee492ba86b1 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys.go @@ -29,5 +29,5 @@ func init() { } func roundup(l int) int { - return (l + kernelAlign - 1) & ^(kernelAlign - 1) + return (l + kernelAlign - 1) &^ (kernelAlign - 1) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsd.go index f13e14ff368..d432835b419 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -2,16 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd openbsd +// +build aix darwin dragonfly freebsd openbsd package socket -import "errors" - func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go index 164ddfce837..b4f41b5522f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build freebsd netbsd openbsd +// +build aix freebsd netbsd openbsd package socket @@ -12,9 +12,12 @@ import ( ) func probeProtocolStack() int { - if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" { + if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { return 8 } + if runtime.GOOS == "aix" { + return 1 + } var p uintptr return int(unsafe.Sizeof(p)) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_const_unix.go new file mode 100644 index 00000000000..43797d6e535 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "golang.org/x/sys/unix" + +const ( + sysAF_UNSPEC = unix.AF_UNSPEC + sysAF_INET = unix.AF_INET + sysAF_INET6 = unix.AF_INET6 + + sysSOCK_RAW = unix.SOCK_RAW +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linkname.go similarity index 98% rename from cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go rename to cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linkname.go index 0999a19fbdc..61c3f38a51b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linkname.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.12 +// +build aix go1.12,darwin package socket diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go new file mode 100644 index 00000000000..64f69f1dc55 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_posix.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_posix.go index 9a9bc47621e..22eae809c9e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package socket @@ -34,7 +33,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip4 := ip.To4(); ip4 != nil { b := make([]byte, sizeofSockaddrInet) switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) default: b[0] = sizeofSockaddrInet @@ -47,7 +46,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { b := make([]byte, sizeofSockaddrInet6) switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) default: b[0] = sizeofSockaddrInet6 @@ -69,7 +68,7 @@ func parseInetAddr(b []byte, network string) (net.Addr, error) { } var af int switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": af = int(NativeEndian.Uint16(b[:2])) default: af = int(b[1]) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_solaris.go index cced74e60d5..66b5547868c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -5,7 +5,6 @@ package socket import ( - "errors" "runtime" "syscall" "unsafe" @@ -63,9 +62,9 @@ func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_stub.go index d9f06d00e9b..0f617426281 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package socket import ( - "errors" "net" "runtime" "unsafe" @@ -36,29 +35,29 @@ func marshalInetAddr(ip net.IP, port int, zone string) []byte { } func parseInetAddr(b []byte, network string) (net.Addr, error) { - return nil, errors.New("not implemented") + return nil, errNotImplemented } func getsockopt(s uintptr, level, name int, b []byte) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func setsockopt(s uintptr, level, name int, b []byte) error { - return errors.New("not implemented") + return errNotImplemented } func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_windows.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_windows.go index 54a470ebe34..d556a446157 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -5,9 +5,10 @@ package socket import ( - "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) func probeProtocolStack() int { @@ -16,11 +17,11 @@ func probeProtocolStack() int { } const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x17 + sysAF_UNSPEC = windows.AF_UNSPEC + sysAF_INET = windows.AF_INET + sysAF_INET6 = windows.AF_INET6 - sysSOCK_RAW = 0x3 + sysSOCK_RAW = windows.SOCK_RAW ) type sockaddrInet struct { @@ -54,17 +55,17 @@ func setsockopt(s uintptr, level, name int, b []byte) error { } func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go new file mode 100644 index 00000000000..813385a98b1 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -0,0 +1,61 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x38 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go index 26f8feff3a1..083bda51c3c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go index e2987f7db82..55c6c9f5770 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go index 26f8feff3a1..083bda51c3c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go index e2987f7db82..55c6c9f5770 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go index c582abd57df..8b7d161d7d5 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go index 04a24886c7a..3e71ff57438 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go index 35c7cb9c953..238d90de626 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go index 04a24886c7a..3e71ff57438 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go new file mode 100644 index 00000000000..238d90de626 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go index 430206930b8..72d8b25421b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go index 430206930b8..72d8b25421b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go index 430206930b8..72d8b25421b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go index 430206930b8..72d8b25421b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go new file mode 100644 index 00000000000..dbff234fbdb --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go index 1502f6c5529..3545319ae91 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go index db60491fe37..bf8f47c88c7 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go index 2a1a79985a0..a46eff99128 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go index db60491fe37..bf8f47c88c7 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go new file mode 100644 index 00000000000..a46eff99128 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go index 1c836361e82..73655a14c4c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go index a6c0bf464a1..0a4de80f235 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go index 1c836361e82..73655a14c4c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go new file mode 100644 index 00000000000..0a4de80f235 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go index 327c63290cd..353cd5fb4ec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1a - - sysSOCK_RAW = 0x4 -) - type iovec struct { Base *int8 Len uint64 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/batch.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/batch.go index 5ce9b3583cc..1a3a4fc0c10 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/batch.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/batch.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package ipv4 import ( @@ -91,6 +89,9 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { n = 0 err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } return n, err } } @@ -154,6 +155,9 @@ func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { n = 0 err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } return n, err } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_bsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_bsd.go index 77e7ad5bed7..19845c55b15 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_stub.go index 5a2f7d8d3c9..a0c049d683a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_unix.go index e1ae8167b3a..b27fa4903a9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_windows.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_windows.go index ce55c66447d..82c6306421b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/control_windows.go @@ -4,13 +4,9 @@ package ipv4 -import ( - "syscall" - - "golang.org/x/net/internal/socket" -) +import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return syscall.EWINDOWS + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/defs_aix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/defs_aix.go new file mode 100644 index 00000000000..0f37211c641 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/defs_aix.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + // IP_RECVIF is defined on AIX but doesn't work. + // IP_RECVINTERFACE must be used instead. + sysIP_RECVIF = C.IP_RECVINTERFACE + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/dgramopt.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/dgramopt.go index 36764492d9d..c191c22aba4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/dgramopt.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -18,7 +18,7 @@ func (c *dgramOpt) MulticastTTL() (int, error) { } so, ok := sockOpts[ssoMulticastTTL] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -31,7 +31,7 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { } so, ok := sockOpts[ssoMulticastTTL] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, ttl) } @@ -44,7 +44,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getMulticastInterface(c.Conn) } @@ -57,7 +57,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setMulticastInterface(c.Conn, ifi) } @@ -70,7 +70,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return false, errOpNoSupport + return false, errNotImplemented } on, err := so.GetInt(c.Conn) if err != nil { @@ -87,7 +87,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, boolint(on)) } @@ -107,7 +107,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoJoinGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -125,7 +125,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoLeaveGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -146,7 +146,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -167,7 +167,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -189,7 +189,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -210,7 +210,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -231,7 +231,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getICMPFilter(c.Conn) } @@ -244,7 +244,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setICMPFilter(c.Conn, f) } @@ -258,7 +258,7 @@ func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { } so, ok := sockOpts[ssoAttachFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setBPF(c.Conn, filter) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/doc.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/doc.go index 863d55b8d67..24583497993 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/doc.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/doc.go @@ -209,7 +209,7 @@ // LeaveSourceSpecificGroup for the operation known as "include" mode, // // ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} -// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} // if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { // // error handling // } @@ -241,5 +241,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" -// BUG(mikio): This package is not implemented on AIX, JS, NaCl and -// Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/endpoint.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/endpoint.go index 5009463787e..4a6d7a85ee6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/endpoint.go @@ -177,7 +177,7 @@ func NewRawConn(c net.PacketConn) (*RawConn, error) { } so, ok := sockOpts[ssoHeaderPrepend] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/genericopt.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/genericopt.go index 587ae4a1944..51c12371eb4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/genericopt.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/genericopt.go @@ -11,7 +11,7 @@ func (c *genericOpt) TOS() (int, error) { } so, ok := sockOpts[ssoTOS] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -24,7 +24,7 @@ func (c *genericOpt) SetTOS(tos int) error { } so, ok := sockOpts[ssoTOS] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, tos) } @@ -36,7 +36,7 @@ func (c *genericOpt) TTL() (int, error) { } so, ok := sockOpts[ssoTTL] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -49,7 +49,7 @@ func (c *genericOpt) SetTTL(ttl int) error { } so, ok := sockOpts[ssoTTL] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, ttl) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/header.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/header.go index a8c8f7a6c85..701bd4b22d8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/header.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/header.go @@ -57,7 +57,7 @@ func (h *Header) String() string { // This may differ from the wire format, depending on the system. func (h *Header) Marshal() ([]byte, error) { if h == nil { - return nil, errInvalidConn + return nil, errNilHeader } if h.Len < HeaderLen { return nil, errHeaderTooShort @@ -107,12 +107,15 @@ func (h *Header) Marshal() ([]byte, error) { // local system. // This may differ from the wire format, depending on the system. func (h *Header) Parse(b []byte) error { - if h == nil || len(b) < HeaderLen { + if h == nil || b == nil { + return errNilHeader + } + if len(b) < HeaderLen { return errHeaderTooShort } hdrlen := int(b[0]&0x0f) << 2 - if hdrlen > len(b) { - return errBufferTooShort + if len(b) < hdrlen { + return errExtHeaderTooShort } h.Version = int(b[0] >> 4) h.Len = hdrlen diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/helper.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/helper.go index 8d8ff98e94a..b494a2cde46 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/helper.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/helper.go @@ -7,23 +7,39 @@ package ipv4 import ( "errors" "net" + "runtime" + + "golang.org/x/net/internal/socket" ) var ( errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errMissingHeader = errors.New("missing header") + errNilHeader = errors.New("nil header") errHeaderTooShort = errors.New("header too short") - errBufferTooShort = errors.New("buffer too short") + errExtHeaderTooShort = errors.New("extension header too short") errInvalidConnType = errors.New("invalid conn type") - errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") errNoSuchMulticastInterface = errors.New("no such multicast interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) - // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. - freebsdVersion uint32 + // See https://www.freebsd.org/doc/en/books/porters-handbook/versions.html. + freebsdVersion uint32 + compatFreeBSD32 bool // 386 emulation on amd64 ) +// See golang.org/issue/30899. +func adjustFreeBSD32(m *socket.Message) { + // FreeBSD 12.0-RELEASE is affected by https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236737 + if 1200086 <= freebsdVersion && freebsdVersion < 1201000 { + l := (m.NN + 4 - 1) &^ (4 - 1) + if m.NN < l && l <= len(m.OOB) { + m.NN = l + } + } +} + func boolint(b bool) int { if b { return 1 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet.go index 966bb776f56..7d784e06dd0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet.go @@ -29,7 +29,35 @@ func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMess if !c.ok() { return nil, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return } func slicePacket(b []byte) (h, p []byte, err error) { @@ -64,5 +92,26 @@ func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { return errInvalidConn } - return c.writeTo(h, p, cm) + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_8.go deleted file mode 100644 index b47d186834d..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_8.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package ipv4 - -import "net" - -func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - n, nn, _, src, err := c.ReadMsgIP(b, oob) - if err != nil { - return nil, nil, nil, err - } - var hs []byte - if hs, p, err = slicePacket(b[:n]); err != nil { - return nil, nil, nil, err - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, err - } - if nn > 0 { - cm = new(ControlMessage) - if err := cm.Parse(oob[:nn]); err != nil { - return nil, nil, nil, err - } - } - if src != nil && cm != nil { - cm.Src = src.IP - } - return -} - -func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { - oob := cm.Marshal() - wh, err := h.Marshal() - if err != nil { - return err - } - dst := new(net.IPAddr) - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - wh = append(wh, p...) - _, _, err = c.WriteMsgIP(wh, oob, dst) - return err -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_9.go deleted file mode 100644 index 082c36d73e1..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/packet_go1_9.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package ipv4 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { - c.rawOpt.RLock() - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - if err := c.RecvMsg(&m, 0); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - var hs []byte - if hs, p, err = slicePacket(b[:m.N]); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - } - if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { - cm.Src = src.IP - } - return -} - -func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { - m := socket.Message{ - OOB: cm.Marshal(), - } - wh, err := h.Marshal() - if err != nil { - return err - } - m.Buffers = [][]byte{wh, p} - dst := new(net.IPAddr) - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - m.Addr = dst - if err := c.SendMsg(&m, 0); err != nil { - return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return nil -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg.go index a7c892dc444..e7614661d7b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -16,7 +20,45 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. if !c.ok() { return 0, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil } // WriteTo writes a payload of the IPv4 datagram, to the destination @@ -29,5 +71,14 @@ func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n if !c.ok() { return 0, errInvalidConn } - return c.writeTo(b, cm, dst) + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go deleted file mode 100644 index 15a27b7a001..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv4 - -import "net" - -func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - var nn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - nb := make([]byte, maxHeaderLen+len(b)) - if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { - return 0, nil, nil, err - } - hdrlen := int(nb[0]&0x0f) << 2 - copy(b, nb[hdrlen:]) - n -= hdrlen - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} - } - if nn > 0 { - cm = new(ControlMessage) - if err = cm.Parse(oob[:nn]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - } - if cm != nil { - cm.Src = netAddrToIP4(src) - } - return -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { - oob := cm.Marshal() - if dst == nil { - return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} - } - return -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go deleted file mode 100644 index aab3b224e57..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv4 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { - c.rawOpt.RLock() - m := socket.Message{ - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - switch c.PacketConn.(type) { - case *net.UDPConn: - m.Buffers = [][]byte{b} - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - case *net.IPConn: - h := make([]byte, HeaderLen) - m.Buffers = [][]byte{h, b} - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - hdrlen := int(h[0]&0x0f) << 2 - if hdrlen > len(h) { - d := hdrlen - len(h) - copy(b, b[d:]) - m.N -= d - } else { - m.N -= hdrlen - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} - } - var cm *ControlMessage - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - cm.Src = netAddrToIP4(m.Addr) - } - return m.N, cm, m.Addr, nil -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: cm.Marshal(), - Addr: dst, - } - err := c.SendMsg(&m, 0) - if err != nil { - err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return m.N, err -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index d57f05c1077..1116256f245 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_posix.go index e96955bc188..dea64519d8e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv4 @@ -39,7 +39,7 @@ func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { return nil, err } if n != sizeofICMPFilter { - return nil, errOpNoSupport + return nil, errNotImplemented } return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil } @@ -58,7 +58,7 @@ func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) erro case ssoTypeGroupReq: return so.setGroupReq(c, ifi, grp) default: - return errOpNoSupport + return errNotImplemented } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 23249b782e3..37d4806b342 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 @@ -14,29 +14,29 @@ import ( ) func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_aix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_aix.go new file mode 100644 index 00000000000..3d1201e6d7c --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 0388cba00c3..c5eaafe96be 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go index f3919208b6e..6dc339ce67a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows package ipv4 @@ -13,13 +13,13 @@ import ( ) func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index 0711d3d786a..48ef55624ec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go index 9a2132093da..5c98642716b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -12,5 +12,5 @@ import ( ) func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_freebsd.go index b80032454a5..482873d9a15 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_freebsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -50,7 +50,7 @@ func init() { archs, _ := syscall.Sysctl("kern.supported_archs") for _, s := range strings.Fields(archs) { if s == "amd64" { - freebsd32o64 = true + compatFreeBSD32 = true break } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq.go index ae5704e77a2..eeced7f3138 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -13,8 +13,6 @@ import ( "golang.org/x/net/internal/socket" ) -var freebsd32o64 bool - func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { var gr groupReq if ifi != nil { @@ -22,7 +20,7 @@ func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) e } gr.setGroup(grp) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupReq + 4]byte s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) copy(d[:4], s[:4]) @@ -41,7 +39,7 @@ func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, sr } gsr.setSourceGroup(grp, src) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupSourceReq + 4]byte s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) copy(d[:4], s[:4]) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go index e6b7623d0d5..c0921674b05 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_stub.go index 4f076473bd1..b9c85b334fc 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go new file mode 100644 index 00000000000..c741d5c8ea9 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -0,0 +1,33 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x20 + sysIP_RECVTTL = 0x22 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_darwin.go index c07cc883fc3..e05a251ba07 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go index c4365e9e712..6d65e9fcb81 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go index 8c4aec94c8a..136e2b8f1d6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go index 4b10b7c575f..4f730f19ef3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go index 4b10b7c575f..4f730f19ef3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_386.go index c0260f0ce34..43ef8e59225 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go index c0260f0ce34..43ef8e59225 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go index c0260f0ce34..43ef8e59225 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go index c0260f0ce34..43ef8e59225 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go index f65bd9a7a68..fa1b6bc61de 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go new file mode 100644 index 00000000000..0c0d48012f7 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -0,0 +1,151 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go index 9c967eaa642..ee8204da460 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_netbsd.go index fd3624d93c4..8cfc648ad7e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_netbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_openbsd.go index 12f36be759e..37629cb0ab5 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_openbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_solaris.go index 0a3875cc41a..cb80a308b0e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package ipv4 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/batch.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/batch.go index 10d64924515..2ccb9849c78 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/batch.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/batch.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package ipv6 import ( diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index eec529c205e..8c221b59895 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_stub.go index a045f28f74c..1d773cbcc8e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_unix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_unix.go index 66515060a88..0971a008bf4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_windows.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_windows.go index ef2563b3fc6..8882d81934d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/control_windows.go @@ -4,13 +4,9 @@ package ipv6 -import ( - "syscall" - - "golang.org/x/net/internal/socket" -) +import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return syscall.EWINDOWS + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/defs_aix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/defs_aix.go new file mode 100644 index 00000000000..ea396a3cb8a --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/defs_aix.go @@ -0,0 +1,82 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/dgramopt.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/dgramopt.go index eea4fde2562..1f422e71dc3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -18,7 +18,7 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -31,7 +31,7 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, hoplim) } @@ -44,7 +44,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getMulticastInterface(c.Conn) } @@ -57,7 +57,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setMulticastInterface(c.Conn, ifi) } @@ -70,7 +70,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return false, errOpNoSupport + return false, errNotImplemented } on, err := so.GetInt(c.Conn) if err != nil { @@ -87,7 +87,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, boolint(on)) } @@ -107,7 +107,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoJoinGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -125,7 +125,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoLeaveGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -146,7 +146,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -167,7 +167,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -189,7 +189,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -210,7 +210,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -233,7 +233,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { } so, ok := sockOpts[ssoChecksum] if !ok { - return false, 0, errOpNoSupport + return false, 0, errNotImplemented } offset, err = so.GetInt(c.Conn) if err != nil { @@ -254,7 +254,7 @@ func (c *dgramOpt) SetChecksum(on bool, offset int) error { } so, ok := sockOpts[ssoChecksum] if !ok { - return errOpNoSupport + return errNotImplemented } if !on { offset = -1 @@ -269,7 +269,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getICMPFilter(c.Conn) } @@ -281,7 +281,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setICMPFilter(c.Conn, f) } @@ -295,7 +295,7 @@ func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { } so, ok := sockOpts[ssoAttachFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setBPF(c.Conn, filter) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/doc.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/doc.go index d38ea0da6c2..e0be9d50d70 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/doc.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/doc.go @@ -240,5 +240,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" -// BUG(mikio): This package is not implemented on AIX, JS, NaCl and -// Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/endpoint.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/endpoint.go index 93257563963..f534a0bf38d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/endpoint.go @@ -37,7 +37,7 @@ func (c *Conn) PathMTU() (int, error) { } so, ok := sockOpts[ssoPathMTU] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } _, mtu, err := so.getMTUInfo(c.Conn) if err != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/genericopt.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/genericopt.go index 1a18f7549ab..0326aed6def 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/genericopt.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/genericopt.go @@ -12,7 +12,7 @@ func (c *genericOpt) TrafficClass() (int, error) { } so, ok := sockOpts[ssoTrafficClass] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -25,7 +25,7 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { } so, ok := sockOpts[ssoTrafficClass] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, tclass) } @@ -37,7 +37,7 @@ func (c *genericOpt) HopLimit() (int, error) { } so, ok := sockOpts[ssoHopLimit] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -50,7 +50,7 @@ func (c *genericOpt) SetHopLimit(hoplim int) error { } so, ok := sockOpts[ssoHopLimit] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, hoplim) } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/helper.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/helper.go index 7ac5352294b..f767b1f5ddd 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/helper.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/helper.go @@ -7,6 +7,7 @@ package ipv6 import ( "errors" "net" + "runtime" ) var ( @@ -14,8 +15,8 @@ var ( errMissingAddress = errors.New("missing address") errHeaderTooShort = errors.New("header too short") errInvalidConnType = errors.New("invalid conn type") - errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) ) func boolint(b bool) int { diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_bsd.go index e1a791de46e..b03025cdccf 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_stub.go index c4b9be6dbff..370e51acd1f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg.go index e17847d0be3..284a04278ed 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -16,7 +20,32 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. if !c.ok() { return 0, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil } // WriteTo writes a payload of the IPv6 datagram, to the destination @@ -28,5 +57,14 @@ func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n if !c.ok() { return 0, errInvalidConn } - return c.writeTo(b, cm, dst) + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go deleted file mode 100644 index a48a6ed64af..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv6 - -import "net" - -func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - var nn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} - } - if nn > 0 { - cm = new(ControlMessage) - if err = cm.Parse(oob[:nn]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - } - if cm != nil { - cm.Src = netAddrToIP16(src) - } - return -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { - oob := cm.Marshal() - if dst == nil { - return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} - } - return -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go deleted file mode 100644 index fb196ed804b..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv6 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { - c.rawOpt.RLock() - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - switch c.PacketConn.(type) { - case *net.UDPConn: - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - case *net.IPConn: - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} - } - var cm *ControlMessage - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - cm.Src = netAddrToIP16(m.Addr) - } - return m.N, cm, m.Addr, nil -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: cm.Marshal(), - Addr: dst, - } - err := c.SendMsg(&m, 0) - if err != nil { - err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return m.N, err -} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index bfb54478407..c5a4c967527 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 0eac86eb8cc..824c623ccef 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 import ( "net" + "runtime" "unsafe" "golang.org/x/net/bpf" @@ -37,7 +38,7 @@ func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { return nil, err } if n != sizeofICMPv6Filter { - return nil, errOpNoSupport + return nil, errNotImplemented } return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil } @@ -54,10 +55,11 @@ func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { return nil, 0, err } if n != sizeofIPv6Mtuinfo { - return nil, 0, errOpNoSupport + return nil, 0, errNotImplemented } mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) - if mi.Addr.Scope_id == 0 { + if mi.Addr.Scope_id == 0 || runtime.GOOS == "aix" { + // AIX kernel might return a wrong address. return nil, int(mi.Mtu), nil } ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) @@ -74,7 +76,7 @@ func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) erro case ssoTypeGroupReq: return so.setGroupReq(c, ifi, grp) default: - return errOpNoSupport + return errNotImplemented } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_stub.go index 1f4a273e445..0a87a93bbd4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 @@ -14,33 +14,33 @@ import ( ) func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { - return nil, 0, errOpNoSupport + return nil, 0, errNotImplemented } func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_aix.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_aix.go new file mode 100644 index 00000000000..bce7091fb0b --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq.go index b0510c0b5d5..8c3934c3eec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go index eece96187b8..87ae4818144 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 @@ -13,5 +13,5 @@ import ( ) func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go index 676bea555f0..eb9f8316237 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -12,5 +12,5 @@ import ( ) func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_freebsd.go index e9349dc2cc2..85a9f5d07de 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_freebsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -51,7 +51,7 @@ func init() { archs, _ := syscall.Sysctl("kern.supported_archs") for _, s := range strings.Fields(archs) { if s == "amd64" { - freebsd32o64 = true + compatFreeBSD32 = true break } } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index add8ccc0b1b..9b52e978cbd 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin freebsd linux solaris +// +build aix darwin freebsd linux solaris package ipv6 @@ -13,7 +13,7 @@ import ( "golang.org/x/net/internal/socket" ) -var freebsd32o64 bool +var compatFreeBSD32 bool // 386 emulation on amd64 func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { var gr groupReq @@ -22,7 +22,7 @@ func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) e } gr.setGroup(grp) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupReq + 4]byte s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) copy(d[:4], s[:4]) @@ -41,7 +41,7 @@ func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, sr } gsr.setSourceGroup(grp, src) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupSourceReq + 4]byte s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) copy(d[:4], s[:4]) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index 581ee490ff2..d5bc1108c5f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!freebsd,!linux,!solaris +// +build !aix,!darwin,!freebsd,!linux,!solaris package ipv6 @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_stub.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_stub.go index b845388ea4a..4f252d09f6c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go new file mode 100644 index 00000000000..bf44e338bd8 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -0,0 +1,103 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysICMP6_FILTER = 0x26 + + sysIPV6_CHECKSUM = 0x27 + sysIPV6_V6ONLY = 0x25 + + sysIPV6_RTHDRDSTOPTS = 0x37 + + sysIPV6_RECVPKTINFO = 0x23 + sysIPV6_RECVHOPLIMIT = 0x29 + sysIPV6_RECVRTHDR = 0x33 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_RECVDSTOPTS = 0x38 + + sysIPV6_USE_MIN_MTU = 0x2c + sysIPV6_RECVPATHMTU = 0x2f + sysIPV6_PATHMTU = 0x2e + + sysIPV6_PKTINFO = 0x21 + sysIPV6_HOPLIMIT = 0x28 + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x34 + sysIPV6_DSTOPTS = 0x36 + sysIPV6_RTHDR = 0x32 + + sysIPV6_RECVTCLASS = 0x2a + + sysIPV6_TCLASS = 0x2b + sysIPV6_DONTFRAG = 0x2d + + sizeofSockaddrStorage = 0x508 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x510 + sizeofGroupSourceReq = 0xa18 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + X__ss_len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [1265]uint8 + Pad_cgo_0 [7]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_darwin.go index 6aab1dfab7c..555744afd71 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go index d2de804d88c..cf3cc1024ac 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go index 919e572d4a1..73f31b260ea 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go index cb8141f9c65..490ce7cf104 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go index cb8141f9c65..490ce7cf104 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_386.go index 73aa8c6dfce..14155dec21e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go index 73aa8c6dfce..14155dec21e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go index 73aa8c6dfce..14155dec21e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go index 73aa8c6dfce..14155dec21e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go index c9bf6a87ef7..a51e142b42a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go new file mode 100644 index 00000000000..1ee237b2b76 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -0,0 +1,173 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go index b64f0157d7a..9566d764685 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_netbsd.go index bcada13b7a7..e39571e0725 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_netbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_openbsd.go index 86cf3c63799..cc1899a630c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_openbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_solaris.go index cf1837dd2af..690eef9341a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package ipv6 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/dial.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 00000000000..811c2e4e962 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/direct.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/direct.go index 4c5ad88b1e7..3d66bdef9d7 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/direct.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/direct.go @@ -5,14 +5,27 @@ package proxy import ( + "context" "net" ) type direct struct{} -// Direct is a direct proxy: one that makes network connections directly. +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. var Direct = direct{} +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. func (direct) Dial(network, addr string) (net.Conn, error) { return net.Dial(network, addr) } + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go index 0689bb6a70f..573fe79e86e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go @@ -5,6 +5,7 @@ package proxy import ( + "context" "net" "strings" ) @@ -41,6 +42,20 @@ func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { return p.dialerForRequest(host).Dial(network, addr) } +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + func (p *PerHost) dialerForRequest(host string) Dialer { if ip := net.ParseIP(host); ip != nil { for _, net := range p.bypassNetworks { diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/proxy.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/proxy.go index f6026b902c8..9ff4b9a7767 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/proxy.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/proxy.go @@ -15,6 +15,7 @@ import ( ) // A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. type Dialer interface { // Dial connects to the given address via the proxy. Dial(network, addr string) (c net.Conn, err error) @@ -25,21 +26,30 @@ type Auth struct { User, Password string } -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { allProxy := allProxyEnv.Get() if len(allProxy) == 0 { - return Direct + return forward } proxyURL, err := url.Parse(allProxy) if err != nil { - return Direct + return forward } - proxy, err := FromURL(proxyURL, Direct) + proxy, err := FromURL(proxyURL, forward) if err != nil { - return Direct + return forward } noProxy := noProxyEnv.Get() @@ -47,7 +57,7 @@ func FromEnvironment() Dialer { return proxy } - perHost := NewPerHost(proxy, Direct) + perHost := NewPerHost(proxy, forward) perHost.AddFromString(noProxy) return perHost } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go index 56345ec8b63..c91651f96db 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go @@ -17,8 +17,14 @@ import ( func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { d := socks.NewDialer(network, address) if forward != nil { - d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) { - return forward.Dial(network, address) + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } } } if auth != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/oauth2/google/google.go b/cluster-autoscaler/vendor/golang.org/x/oauth2/google/google.go index 6eb2aa95f5b..81de32b360b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/oauth2/google/google.go +++ b/cluster-autoscaler/vendor/golang.org/x/oauth2/google/google.go @@ -194,9 +194,16 @@ func (cs computeSource) Token() (*oauth2.Token, error) { if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") } - return &oauth2.Token{ + tok := &oauth2.Token{ AccessToken: res.AccessToken, TokenType: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil } diff --git a/cluster-autoscaler/vendor/golang.org/x/oauth2/jwt/jwt.go b/cluster-autoscaler/vendor/golang.org/x/oauth2/jwt/jwt.go index 99f3e0a32c8..b2bf18298b0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/cluster-autoscaler/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -66,6 +66,14 @@ type Config struct { // request. If empty, the value of TokenURL is used as the // intended audience. Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool } // TokenSource returns a JWT TokenSource using the configuration @@ -97,9 +105,10 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } hc := oauth2.NewClient(js.ctx, nil) claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, } if subject := js.conf.Subject; subject != "" { claimSet.Sub = subject @@ -166,5 +175,11 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } token.Expiry = time.Unix(claimSet.Exp, 0) } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } return token, nil } diff --git a/cluster-autoscaler/vendor/golang.org/x/oauth2/oauth2.go b/cluster-autoscaler/vendor/golang.org/x/oauth2/oauth2.go index 428283f0b01..291df5c833f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/oauth2/oauth2.go +++ b/cluster-autoscaler/vendor/golang.org/x/oauth2/oauth2.go @@ -117,7 +117,7 @@ var ( // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") ) // An AuthCodeOption is passed to Config.AuthCodeURL. diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/README.md b/cluster-autoscaler/vendor/golang.org/x/sys/unix/README.md index 2bf415fb1cf..eb2f78ae295 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/README.md +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/README.md @@ -32,7 +32,7 @@ To build the files for your current OS and architecture, make sure GOOS and GOARCH are set correctly and run `mkall.sh`. This will generate the files for your specific system. Running `mkall.sh -n` shows the commands that will be run. -Requirements: bash, perl, go +Requirements: bash, go ### New Build System (currently for `GOOS == "linux"`) @@ -52,14 +52,14 @@ system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will then generate all of the files for all of the GOOS/GOARCH pairs in the new build system. Running `mkall.sh -n` shows the commands that will be run. -Requirements: bash, perl, go, docker +Requirements: bash, go, docker ## Component files This section describes the various files used in the code generation process. It also contains instructions on how to modify these files to add a new architecture/OS or to add additional syscalls, types, or constants. Note that -if you are using the new build system, the scripts cannot be called normally. +if you are using the new build system, the scripts/programs cannot be called normally. They must be called from within the docker container. ### asm files @@ -81,8 +81,8 @@ each GOOS/GOARCH pair. ### mksysnum -Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl` -for the old system). This script takes in a list of header files containing the +Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go` +for the old system). This program takes in a list of header files containing the syscall number declarations and parses them to produce the corresponding list of Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated constants. @@ -92,14 +92,14 @@ new installation of the target OS (or updating the source checkouts for the new build system). However, depending on the OS, you make need to update the parsing in mksysnum. -### mksyscall.pl +### mksyscall.go The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are hand-written Go files which implement system calls (for unix, the specific OS, or the specific OS/Architecture pair respectively) that need special handling and list `//sys` comments giving prototypes for ones that can be generated. -The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts +The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts them into syscalls. This requires the name of the prototype in the comment to match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function prototype can be exported (capitalized) or not. @@ -160,7 +160,7 @@ signal numbers, and constants. Generated by `mkerrors.sh` (see above). ### `zsyscall_${GOOS}_${GOARCH}.go` A file containing all the generated syscalls for a specific GOOS and GOARCH. -Generated by `mksyscall.pl` (see above). +Generated by `mksyscall.go` (see above). ### `zsysnum_${GOOS}_${GOARCH}.go` diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/affinity_linux.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/affinity_linux.go index 72afe3338cf..14e4d5caa3e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -91,9 +91,13 @@ func onesCount64(x uint64) int { const m0 = 0x5555555555555555 // 01010101 ... const m1 = 0x3333333333333333 // 00110011 ... const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - const m3 = 0x00ff00ff00ff00ff // etc. - const m4 = 0x0000ffff0000ffff + // Unused in this function, but definitions preserved for + // documentation purposes: + // + // const m3 = 0x00ff00ff00ff00ff // etc. + // const m4 = 0x0000ffff0000ffff + // // Implementation: Parallel summing of adjacent bits. // See "Hacker's Delight", Chap. 5: Counting Bits. // The following pattern shows the general approach: diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s new file mode 100644 index 00000000000..6db717de53c --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64,!gccgo + +#include "textflag.h" + +// +// System calls for linux/riscv64. +// +// Where available, just jump to package syscall's implementation of +// these functions. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + CALL runtime·entersyscall(SB) + MOV a1+8(FP), A0 + MOV a2+16(FP), A1 + MOV a3+24(FP), A2 + MOV $0, A3 + MOV $0, A4 + MOV $0, A5 + MOV $0, A6 + MOV trap+0(FP), A7 // syscall entry + ECALL + MOV A0, r1+32(FP) // r1 + MOV A1, r2+40(FP) // r2 + CALL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOV a1+8(FP), A0 + MOV a2+16(FP), A1 + MOV a3+24(FP), A2 + MOV ZERO, A3 + MOV ZERO, A4 + MOV ZERO, A5 + MOV trap+0(FP), A7 // syscall entry + ECALL + MOV A0, r1+32(FP) + MOV A1, r2+40(FP) + RET diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s new file mode 100644 index 00000000000..0cedea3d39d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for arm64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/dirent.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/dirent.go index 4407c505a36..304016b6886 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/dirent.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/dirent.go @@ -2,16 +2,101 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix -import "syscall" +import "unsafe" + +// readInt returns the size-bytes unsigned integer in native byte order at offset off. +func readInt(b []byte, off, size uintptr) (u uint64, ok bool) { + if len(b) < int(off+size) { + return 0, false + } + if isBigEndian { + return readIntBE(b[off:], size), true + } + return readIntLE(b[off:], size), true +} + +func readIntBE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[1]) | uint64(b[0])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} + +func readIntLE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} // ParseDirent parses up to max directory entries in buf, // appending the names to names. It returns the number of // bytes consumed from buf, the number of entries added // to names, and the new names slice. func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { - return syscall.ParseDirent(buf, max, names) + origlen := len(buf) + count = 0 + for max != 0 && len(buf) > 0 { + reclen, ok := direntReclen(buf) + if !ok || reclen > uint64(len(buf)) { + return origlen, count, names + } + rec := buf[:reclen] + buf = buf[reclen:] + ino, ok := direntIno(rec) + if !ok { + break + } + if ino == 0 { // File absent in directory. + continue + } + const namoff = uint64(unsafe.Offsetof(Dirent{}.Name)) + namlen, ok := direntNamlen(rec) + if !ok || namoff+namlen > uint64(len(rec)) { + break + } + name := rec[namoff : namoff+namlen] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + max-- + count++ + names = append(names, string(name)) + } + return origlen - len(buf), count, names } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/endian_little.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/endian_little.go index 085df2d8dd7..bcdb5d30eb9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/endian_little.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le +// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 package unix diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh index d74115a8a18..5a22eca9676 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh @@ -105,25 +105,25 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_386) @@ -146,31 +146,46 @@ netbsd_arm) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; +netbsd_arm64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -netbsd" + mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; openbsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32 -openbsd" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) mkerrors="$mkerrors -m64" mksyscall="go run mksyscall.go -openbsd" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -openbsd -arm" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_arm64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; solaris_amd64) - mksyscall="./mksyscall_solaris.pl" + mksyscall="go run mksyscall_solaris.go" mkerrors="$mkerrors -m64" mksysnum= mktypes="GOARCH=$GOARCH go tool cgo -godefs" @@ -207,8 +222,6 @@ esac esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then - echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; + if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi - fi ) | $run diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh index 6a23484e5b5..14624b9531b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -182,6 +182,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -192,10 +194,12 @@ struct ltchars { #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -221,6 +225,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -431,7 +436,9 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^LO_(KEY|NAME)_SIZE$/ || + $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 == "ICMPV6_FILTER" || @@ -464,7 +471,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || @@ -498,9 +505,11 @@ ccflags="$@" $2 ~ /^NFN/ || $2 ~ /^XDP_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || + $2 ~ /^CRYPTO_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || + $2 ~ /^FAN_|FANOTIFY_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkpost.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkpost.go index 9feddd00c4b..eb4332059ae 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkpost.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkpost.go @@ -42,9 +42,16 @@ func main() { log.Fatal(err) } + if goos == "aix" { + // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t + // to avoid having both StTimespec and Timespec. + sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) + b = sttimespec.ReplaceAll(b, []byte("Timespec")) + } + // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) // Intentionally export __fds_bits field in FdSet fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) @@ -96,6 +103,15 @@ func main() { cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) + // Rename Stat_t time fields + if goos == "freebsd" && goarch == "386" { + // Hide Stat_t.[AMCB]tim_ext fields + renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) + b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) + } + renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) + b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) + // gofmt b, err = format.Source(b) if err != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall.go index e06e4253ea3..e4af9424e97 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall.go @@ -153,6 +153,11 @@ func main() { } funct, inps, outps, sysname := f[2], f[3], f[4], f[5] + // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. + if goos == "darwin" && !libc && funct == "ClockGettime" { + continue + } + // Split argument lists on comma. in := parseParamList(inps) out := parseParamList(outps) @@ -228,7 +233,7 @@ func main() { } else { args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) } - } else if p.Type == "int64" && endianness != "" { + } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { if len(args)%2 == 1 && *arm { // arm abi specifies 64-bit argument uses // (even, odd) pair diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go index f2c58fb7cc7..3be3cdfc3b6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go @@ -214,6 +214,11 @@ func main() { } if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } // Imports of system calls from libc cExtern += fmt.Sprintf("%s %s", cRettype, sysname) cIn := strings.Join(cIn, ", ") @@ -328,7 +333,13 @@ func main() { } else { call += "" } - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) + } else { + call += fmt.Sprintf("C.%s(%s)", sysname, arglist) + } // Assign return values. body := "" diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go index 45b4429088f..c960099517a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go @@ -282,6 +282,11 @@ func main() { if !onlyCommon { // GCCGO Prototype Generation // Imports of system calls from libc + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } cExtern += fmt.Sprintf("%s %s", cRettype, sysname) cIn := strings.Join(cIn, ", ") cExtern += fmt.Sprintf("(%s);\n", cIn) @@ -490,7 +495,14 @@ func main() { // GCCGO function generation argsgccgolist := strings.Join(argsgccgo, ", ") - callgccgo := fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) + var callgccgo string + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) + } else { + callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) + } textgccgo += callProto textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) textgccgo += "\te1 = syscall.GetErrno()\n" diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.go new file mode 100644 index 00000000000..3d864738b69 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.go @@ -0,0 +1,335 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* + This program reads a file containing function prototypes + (like syscall_solaris.go) and generates system call bodies. + The prototypes are marked by lines beginning with "//sys" + and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + text := "" + dynimports := "" + linknames := "" + var vars []string + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // So file name. + if modname == "" { + modname = "libc" + } + + // System call name. + if sysname == "" { + sysname = funct + } + + // System call pointer variable name. + sysvarname := fmt.Sprintf("proc%s", sysname) + + strconvfunc := "BytePtrFromString" + strconvtype := "*byte" + + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + // Runtime import of function to allow cross-platform builds. + dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) + // Link symbol to proc address variable. + linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) + // Library proc address variable. + vars = append(vars, sysvarname) + + // Go function header. + outlist := strings.Join(out, ", ") + if outlist != "" { + outlist = fmt.Sprintf(" (%s)", outlist) + } + if text != "" { + text += "\n" + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + continue + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && endianness != "" { + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else if p.Type == "bool" { + text += fmt.Sprintf("\tvar _p%d uint32\n", n) + text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) + n++ + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + nargs := len(args) + + // Determine which form to use; pad args with zeros. + asm := "sysvicall6" + if nonblock != nil { + asm = "rawSysvicall6" + } + if len(args) <= 6 { + for len(args) < 6 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) + os.Exit(1) + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + ret[2] = reg + doErrno = true + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%d != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) + os.Exit(1) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + text += body + + if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + vardecls := "\t" + strings.Join(vars, ",\n\t") + vardecls += " syscallFunc" + fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + +import ( + "syscall" + "unsafe" +) +%s +%s +%s +var ( +%s +) + +%s +` diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl deleted file mode 100644 index a354df5a6bf..00000000000 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_solaris.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named err. -# * If go func name needs to be different than its libc name, -# * or the function is not in libc, name could be specified -# * at the end, after "=" sign, like -# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - -use strict; - -my $cmdline = "mksyscall_solaris.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $tags = ""; # build tags - -binmode STDOUT; - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-tags") { - shift; - $tags = $ARGV[0]; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall_solaris.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; - exit 1; -} - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $package = ""; -my $text = ""; -my $dynimports = ""; -my $linknames = ""; -my @vars = (); -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - $package = $1 if !$package && /^package (\S+)$/; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, err error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # So file name. - if($modname eq "") { - $modname = "libc"; - } - - # System call name. - if($sysname eq "") { - $sysname = "$func"; - } - - # System call pointer variable name. - my $sysvarname = "proc$sysname"; - - my $strconvfunc = "BytePtrFromString"; - my $strconvtype = "*byte"; - - $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. - - # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; - # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; - # Library proc address variable. - push @vars, $sysvarname; - - # Go function header. - $out = join(', ', @out); - if($out ne "") { - $out = " ($out)"; - } - if($text ne "") { - $text .= "\n" - } - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, _ = $strconvfunc($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass nil in that case. - $text .= "\tvar _p$n *$1\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && $_32bit ne "") { - if($_32bit eq "big-endian") { - push @args, "uintptr($name >> 32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name >> 32)"; - } - } elsif($type eq "bool") { - $text .= "\tvar _p$n uint32\n"; - $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; - push @args, "uintptr(_p$n)"; - $n++; - } else { - push @args, "uintptr($name)"; - } - } - my $nargs = @args; - - # Determine which form to use; pad args with zeros. - my $asm = "sysvicall6"; - if ($nonblock) { - $asm = "rawSysvicall6"; - } - if(@args <= 6) { - while(@args < 6) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; - - # Assign return values. - my $body = ""; - my $failexpr = ""; - my @ret = ("_", "_", "_"); - my @pout= (); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err") { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1") { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - $text .= $body; - - if ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n"; -} - -if($errors) { - exit 1; -} - -print < "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (
) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <>= 32 + stat.Mtim.Nsec >>= 32 + stat.Ctim.Nsec >>= 32 +} + +func Fstat(fd int, stat *Stat_t) error { + err := fstat(fd, stat) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error { + err := fstatat(dirfd, path, stat, flags) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Lstat(path string, stat *Stat_t) error { + err := lstat(path, stat) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Stat(path string, statptr *Stat_t) error { + err := stat(path, statptr) + if err != nil { + return err + } + fixStatTimFields(statptr) + return nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_bsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_bsd.go index 33c8b5f0db7..97a8eef6fae 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -63,15 +63,6 @@ func Setgroups(gids []int) (err error) { return setgroups(len(a), &a[0]) } -func ReadDirent(fd int, buf []byte) (n int, err error) { - // Final argument is (basep *uintptr) and the syscall doesn't take nil. - // 64 bits should be enough. (32 bits isn't even on 386). Since the - // actual system call is getdirentries64, 64 is a good guess. - // TODO(rsc): Can we use a single global basep for all calls? - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) -} - // Wait status is 7 bits at bottom, either 0 (exited), // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. @@ -86,6 +77,7 @@ const ( shift = 8 exited = 0 + killed = 9 stopped = 0x7F ) @@ -112,6 +104,8 @@ func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } +func (w WaitStatus) Killed() bool { return w&mask == killed && syscall.Signal(w>>shift) != SIGKILL } + func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } func (w WaitStatus) StopSignal() syscall.Signal { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin.go index a2e36888222..3e1cdfb5058 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -77,7 +77,18 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } @@ -144,6 +155,23 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( //sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) +func SysctlClockinfo(name string) (*Clockinfo, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofClockinfo) + var ci Clockinfo + if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofClockinfo { + return nil, EIO + } + return &ci, nil +} + //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 489726fa9bd..cd8be182a5f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 914b89bde5a..d0d07243ce2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 4a284cf5025..01e8a38a944 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -8,6 +8,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 52dcd88f6bf..e674f81dad7 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -10,6 +10,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 962eee30464..260a400f91d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -57,6 +57,22 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + namlen, ok := direntNamlen(buf) + if !ok { + return 0, false + } + return (16 + namlen + 1 + 7) &^ 7, true +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -269,6 +285,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) //sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go index a7ca1ebea31..329d240b907 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -82,6 +82,18 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -362,7 +374,21 @@ func Getdents(fd int, buf []byte) (n int, err error) { func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { if supportsABI(_ino64First) { - return getdirentries_freebsd12(fd, buf, basep) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The freebsd12 syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries_freebsd12(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } // The old syscall entries are smaller than the new. Use 1/4 of the original @@ -404,22 +430,22 @@ func roundup(x, y int) int { func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Birthtim: old.Birthtim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), + Dev: uint64(old.Dev), + Ino: uint64(old.Ino), + Nlink: uint64(old.Nlink), + Mode: old.Mode, + Uid: old.Uid, + Gid: old.Gid, + Rdev: uint64(old.Rdev), + Atim: old.Atim, + Mtim: old.Mtim, + Ctim: old.Ctim, + Btim: old.Btim, + Size: old.Size, + Blocks: old.Blocks, + Blksize: old.Blksize, + Flags: old.Flags, + Gen: uint64(old.Gen), } } @@ -507,6 +533,70 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +//sys ptrace(request int, pid int, addr uintptr, data int) (err error) + +func PtraceAttach(pid int) (err error) { + return ptrace(PTRACE_ATTACH, pid, 0, 0) +} + +func PtraceCont(pid int, signal int) (err error) { + return ptrace(PTRACE_CONT, pid, 1, signal) +} + +func PtraceDetach(pid int) (err error) { + return ptrace(PTRACE_DETACH, pid, 1, 0) +} + +func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { + return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) +} + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceGetRegs(pid int, regsout *Reg) (err error) { + return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} + +func PtraceLwpEvents(pid int, enable int) (err error) { + return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) +} + +func PtraceLwpInfo(pid int, info uintptr) (err error) { + return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +} + +func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { + return PtraceIO(PIOD_READ_D, pid, addr, out, SizeofLong) +} + +func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { + return PtraceIO(PIOD_READ_I, pid, addr, out, SizeofLong) +} + +func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { + return PtraceIO(PIOD_WRITE_D, pid, addr, data, SizeofLong) +} + +func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { + return PtraceIO(PIOD_WRITE_I, pid, addr, data, SizeofLong) +} + +func PtraceSetRegs(pid int, regs *Reg) (err error) { + return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) +} + +func PtraceSingleStep(pid int) (err error) { + return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) +} + /* * Exposed directly */ @@ -555,7 +645,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go index a07ee49ea39..637b5017b8b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,7 +13,6 @@ package unix import ( "encoding/binary" - "net" "runtime" "syscall" "unsafe" @@ -39,6 +38,20 @@ func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } +//sys FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) +//sys fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) + +func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) (err error) { + if pathname == "" { + return fanotifyMark(fd, flags, mask, dirFd, nil) + } + p, err := BytePtrFromString(pathname) + if err != nil { + return err + } + return fanotifyMark(fd, flags, mask, dirFd, p) +} + //sys fchmodat(dirfd int, path string, mode uint32) (err error) func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -95,6 +108,12 @@ func IoctlGetInt(fd int, req uint) (int, error) { return value, err } +func IoctlGetUint32(fd int, req uint) (uint32, error) { + var value uint32 + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -745,7 +764,7 @@ const px_proto_oe = 0 type SockaddrPPPoE struct { SID uint16 - Remote net.HardwareAddr + Remote []byte Dev string raw RawSockaddrPPPoX } @@ -896,7 +915,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } sa := &SockaddrPPPoE{ SID: binary.BigEndian.Uint16(pp[6:8]), - Remote: net.HardwareAddr(pp[8:14]), + Remote: pp[8:14], } for i := 14; i < 14+IFNAMSIZ; i++ { if pp[i] == 0 { @@ -990,10 +1009,50 @@ func GetsockoptString(fd, level, opt int) (string, error) { return string(buf[:vallen-1]), nil } +func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { + var value TpacketStats + vallen := _Socklen(SizeofTpacketStats) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptTpacketStatsV3(fd, level, opt int) (*TpacketStatsV3, error) { + var value TpacketStatsV3 + vallen := _Socklen(SizeofTpacketStatsV3) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } +func SetsockoptPacketMreq(fd, level, opt int, mreq *PacketMreq) error { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +// SetsockoptSockFprog attaches a classic BPF or an extended BPF program to a +// socket to filter incoming packets. See 'man 7 socket' for usage information. +func SetsockoptSockFprog(fd, level, opt int, fprog *SockFprog) error { + return setsockopt(fd, level, opt, unsafe.Pointer(fprog), unsafe.Sizeof(*fprog)) +} + +func SetsockoptCanRawFilter(fd, level, opt int, filter []CanFilter) error { + var p unsafe.Pointer + if len(filter) > 0 { + p = unsafe.Pointer(&filter[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(filter)*SizeofCanFilter)) +} + +func SetsockoptTpacketReq(fd, level, opt int, tp *TpacketReq) error { + return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) +} + +func SetsockoptTpacketReq3(fd, level, opt int, tp *TpacketReq3) error { + return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1354,8 +1413,20 @@ func Reboot(cmd int) (err error) { return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") } -func ReadDirent(fd int, buf []byte) (n int, err error) { - return Getdents(fd, buf) +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } //sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) @@ -1390,6 +1461,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Acct(path string) (err error) //sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) //sys Adjtimex(buf *Timex) (state int, err error) +//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error) +//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) @@ -1477,9 +1550,13 @@ func Setgid(uid int) (err error) { return EOPNOTSUPP } +func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { + return signalfd(fd, sigmask, _C__NSIG/8, flags) +} + //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) -//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4 +//sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4 //sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) @@ -1608,6 +1685,82 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return EACCES } +//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT +//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT + +// fileHandle is the argument to nameToHandleAt and openByHandleAt. We +// originally tried to generate it via unix/linux/types.go with "type +// fileHandle C.struct_file_handle" but that generated empty structs +// for mips64 and mips64le. Instead, hard code it for now (it's the +// same everywhere else) until the mips64 generator issue is fixed. +type fileHandle struct { + Bytes uint32 + Type int32 +} + +// FileHandle represents the C struct file_handle used by +// name_to_handle_at (see NameToHandleAt) and open_by_handle_at (see +// OpenByHandleAt). +type FileHandle struct { + *fileHandle +} + +// NewFileHandle constructs a FileHandle. +func NewFileHandle(handleType int32, handle []byte) FileHandle { + const hdrSize = unsafe.Sizeof(fileHandle{}) + buf := make([]byte, hdrSize+uintptr(len(handle))) + copy(buf[hdrSize:], handle) + fh := (*fileHandle)(unsafe.Pointer(&buf[0])) + fh.Type = handleType + fh.Bytes = uint32(len(handle)) + return FileHandle{fh} +} + +func (fh *FileHandle) Size() int { return int(fh.fileHandle.Bytes) } +func (fh *FileHandle) Type() int32 { return fh.fileHandle.Type } +func (fh *FileHandle) Bytes() []byte { + n := fh.Size() + if n == 0 { + return nil + } + return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] +} + +// NameToHandleAt wraps the name_to_handle_at system call; it obtains +// a handle for a path name. +func NameToHandleAt(dirfd int, path string, flags int) (handle FileHandle, mountID int, err error) { + var mid _C_int + // Try first with a small buffer, assuming the handle will + // only be 32 bytes. + size := uint32(32 + unsafe.Sizeof(fileHandle{})) + didResize := false + for { + buf := make([]byte, size) + fh := (*fileHandle)(unsafe.Pointer(&buf[0])) + fh.Bytes = size - uint32(unsafe.Sizeof(fileHandle{})) + err = nameToHandleAt(dirfd, path, fh, &mid, flags) + if err == EOVERFLOW { + if didResize { + // We shouldn't need to resize more than once + return + } + didResize = true + size = fh.Bytes + uint32(unsafe.Sizeof(fileHandle{})) + continue + } + if err != nil { + return + } + return FileHandle{fh}, int(mid), nil + } +} + +// OpenByHandleAt wraps the open_by_handle_at system call; it opens a +// file via a handle as previously returned by NameToHandleAt. +func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err error) { + return openByHandleAt(mountFD, handle.fileHandle, flags) +} + /* * Unimplemented */ @@ -1615,8 +1768,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // Alarm // ArchPrctl // Brk -// Capget -// Capset // ClockNanosleep // ClockSettime // Clone diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index cda3559419c..f626794439b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -19,12 +19,18 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } +//sysnb pipe(p *[2]_C_int) (err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int + // Try pipe2 first for Android O, then try pipe for kernel 2.6.23. err = pipe2(&pp, 0) + if err == ENOSYS { + err = pipe(&pp) + } p[0] = int(pp[0]) p[1] = int(pp[1]) return @@ -266,3 +272,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error { // order of their arguments. return armSyncFileRange(fd, flags, off, n) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6d567224017..cb20b15d5d2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -208,3 +208,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return ppoll(&fds[0], len(fds), ts, nil) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index f23ca451c7d..6230f640520 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -211,3 +211,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5240e16e4b3..5ef30904012 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -94,6 +94,18 @@ func nametomib(name string) (mib []_C_int, err error) { return mib, nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func SysctlClockinfo(name string) (*Clockinfo, error) { mib, err := sysctlmib(name) if err != nil { @@ -120,9 +132,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>32 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 687999549c8..1a074b2fe10 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -43,6 +43,35 @@ func nametomib(name string) (mib []_C_int, err error) { return nil, EINVAL } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + +func SysctlClockinfo(name string) (*Clockinfo, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofClockinfo) + var ci Clockinfo + if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofClockinfo { + return nil, EIO + } + return &ci, nil +} + func SysctlUvmexp(name string) (*Uvmexp, error) { mib, err := sysctlmib(name) if err != nil { @@ -72,9 +101,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>32 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO was allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go new file mode 100644 index 00000000000..0fb39cf5eb1 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/amd64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go index e4780127537..0153a316dd9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -35,6 +35,22 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} + //sysnb pipe(p *[2]_C_int) (n int, err error) func Pipe(p []int) (err error) { @@ -189,6 +205,7 @@ func Setgroups(gids []int) (err error) { return setgroups(len(a), &a[0]) } +// ReadDirent reads directory entries from fd and writes them into buf. func ReadDirent(fd int, buf []byte) (n int, err error) { // Final argument is (basep *uintptr) and the syscall doesn't take nil. // TODO(rsc): Can we use a single global basep for all calls? diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go index 33583a22b67..3de37566c6f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -28,6 +28,11 @@ var ( errENOENT error = syscall.ENOENT ) +var ( + signalNameMapOnce sync.Once + signalNameMap map[string]syscall.Signal +) + // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { @@ -66,6 +71,19 @@ func SignalName(s syscall.Signal) string { return "" } +// SignalNum returns the syscall.Signal for signal named s, +// or 0 if a signal with such name is not found. +// The signal name should start with "SIG". +func SignalNum(s string) syscall.Signal { + signalNameMapOnce.Do(func() { + signalNameMap = make(map[string]syscall.Signal) + for _, signal := range signalList { + signalNameMap[signal.name] = signal.num + } + }) + return signalNameMap[s] +} + // clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. func clen(n []byte) int { i := bytes.IndexByte(n, 0) @@ -276,6 +294,13 @@ func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { return &tv, err } +func GetsockoptUint64(fd, level, opt int) (value uint64, err error) { + var n uint64 + vallen := _Socklen(8) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -326,13 +351,21 @@ func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { } func SetsockoptString(fd, level, opt int, s string) (err error) { - return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s))) + var p unsafe.Pointer + if len(s) > 0 { + p = unsafe.Pointer(&[]byte(s)[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(s))) } func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) } +func SetsockoptUint64(fd, level, opt int, value uint64) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 8) +} + func Socket(domain, typ, proto int) (fd int, err error) { if domain == AF_INET6 && SocketDisableIPv6 { return -1, EAFNOSUPPORT @@ -377,3 +410,22 @@ func SetNonblock(fd int, nonblocking bool) (err error) { func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } + +// Lutimes sets the access and modification times tv on path. If path refers to +// a symlink, it is not dereferenced and the timestamps are set on the symlink. +// If tv is nil, the access and modification times are set to the current time. +// Otherwise tv must contain exactly 2 elements, with access time as the first +// element and modification time as the second element. +func Lutimes(path string, tv []Timeval) error { + if tv == nil { + return UtimesNanoAt(AT_FDCWD, path, nil, AT_SYMLINK_NOFOLLOW) + } + if len(tv) != 2 { + return EINVAL + } + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_aix.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_aix.go index 25e834940d7..40d2beede55 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_aix.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_aix.go @@ -87,8 +87,6 @@ type Mode_t C.mode_t type Timespec C.struct_timespec -type StTimespec C.struct_st_timespec - type Timeval C.struct_timeval type Timeval32 C.struct_timeval32 @@ -133,6 +131,8 @@ type RawSockaddrInet6 C.struct_sockaddr_in6 type RawSockaddrUnix C.struct_sockaddr_un +type RawSockaddrDatalink C.struct_sockaddr_dl + type RawSockaddr C.struct_sockaddr type RawSockaddrAny C.struct_sockaddr_any @@ -156,17 +156,18 @@ type Linger C.struct_linger type Msghdr C.struct_msghdr const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) // Routing and interface messages diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_darwin.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_darwin.go index 9fd2aaa6a22..155c2e692b4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_darwin.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_darwin.go @@ -275,3 +275,9 @@ const ( // uname type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_freebsd.go index 74707989512..a121dc3368f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -243,11 +243,55 @@ const ( // Ptrace requests const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL + PTRACE_ATTACH = C.PT_ATTACH + PTRACE_CONT = C.PT_CONTINUE + PTRACE_DETACH = C.PT_DETACH + PTRACE_GETFPREGS = C.PT_GETFPREGS + PTRACE_GETFSBASE = C.PT_GETFSBASE + PTRACE_GETLWPLIST = C.PT_GETLWPLIST + PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS + PTRACE_GETREGS = C.PT_GETREGS + PTRACE_GETXSTATE = C.PT_GETXSTATE + PTRACE_IO = C.PT_IO + PTRACE_KILL = C.PT_KILL + PTRACE_LWPEVENTS = C.PT_LWP_EVENTS + PTRACE_LWPINFO = C.PT_LWPINFO + PTRACE_SETFPREGS = C.PT_SETFPREGS + PTRACE_SETREGS = C.PT_SETREGS + PTRACE_SINGLESTEP = C.PT_STEP + PTRACE_TRACEME = C.PT_TRACE_ME ) +const ( + PIOD_READ_D = C.PIOD_READ_D + PIOD_WRITE_D = C.PIOD_WRITE_D + PIOD_READ_I = C.PIOD_READ_I + PIOD_WRITE_I = C.PIOD_WRITE_I +) + +const ( + PL_FLAG_BORN = C.PL_FLAG_BORN + PL_FLAG_EXITED = C.PL_FLAG_EXITED + PL_FLAG_SI = C.PL_FLAG_SI +) + +const ( + TRAP_BRKPT = C.TRAP_BRKPT + TRAP_TRACE = C.TRAP_TRACE +) + +type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo + +type __Siginfo C.struct___siginfo + +type Sigset_t C.sigset_t + +type Reg C.struct_reg + +type FpReg C.struct_fpreg + +type PtraceIoDesc C.struct_ptrace_io_desc + // Events (kqueue, kevent) type Kevent_t C.struct_kevent_freebsd11 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_netbsd.go index 2dd4f9542c9..4a96d72c37d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -254,6 +254,7 @@ type Ptmget C.struct_ptmget const ( AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_openbsd.go index 4e5e57f9a61..775cb57dc8a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -241,6 +241,7 @@ type Winsize C.struct_winsize const ( AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) @@ -274,3 +275,9 @@ type Utsname C.struct_utsname const SizeofUvmexp = C.sizeof_struct_uvmexp type Uvmexp C.struct_uvmexp + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/openbsd_unveil.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/unveil_openbsd.go similarity index 98% rename from cluster-autoscaler/vendor/golang.org/x/sys/unix/openbsd_unveil.go rename to cluster-autoscaler/vendor/golang.org/x/sys/unix/unveil_openbsd.go index aebc2dc5768..168d5ae7791 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/openbsd_unveil.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build openbsd - package unix import ( diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 4b7b965027d..1def8a58126 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -926,6 +926,8 @@ const ( TCSETSF = 0x5404 TCSETSW = 0x5403 TCXONC = 0x540b + TIMER_ABSTIME = 0x3e7 + TIMER_MAX = 0x20 TIOC = 0x5400 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index ed04fd1b77d..03187dea98f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -3,7 +3,7 @@ // +build ppc64,aix -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go package unix @@ -926,6 +926,8 @@ const ( TCSETSF = 0x5404 TCSETSW = 0x5403 TCXONC = 0x540b + TIMER_ABSTIME = 0x3e7 + TIMER_MAX = 0x20 TIOC = 0x5400 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index cb89df8f543..3fb475bcc02 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,6 +614,60 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 @@ -493,6 +675,7 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -514,7 +697,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -539,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -804,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -966,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1006,6 +1205,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1134,7 +1342,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1398,6 +1606,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1435,6 +1649,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1812,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1905,6 +2124,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1953,6 +2173,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -1964,9 +2186,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2008,6 +2238,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2059,6 +2290,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2225,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 @@ -2232,6 +2466,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2259,8 +2494,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2409,6 +2646,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 73c9b88ca78..9c4e19f9a3b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,6 +614,60 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 @@ -493,6 +675,7 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -514,7 +697,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -539,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -804,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -966,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1006,6 +1205,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1134,7 +1342,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1398,6 +1606,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1435,6 +1649,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1813,6 +2028,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1906,6 +2125,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1954,6 +2174,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -1965,9 +2187,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2009,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2060,6 +2291,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2226,6 +2459,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 @@ -2233,6 +2467,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2260,8 +2495,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2409,6 +2646,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index f1ef82f57ed..a1f038c06d0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1819,6 +2034,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1912,6 +2131,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1960,6 +2180,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -1971,9 +2193,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2015,6 +2245,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2066,6 +2297,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2232,6 +2465,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 @@ -2239,6 +2473,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2266,8 +2501,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2415,6 +2652,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index cf17c990694..504ce138983 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -414,6 +541,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -488,6 +616,60 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 @@ -495,6 +677,7 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -516,7 +699,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -541,6 +724,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -806,6 +990,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -968,6 +1153,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1007,6 +1206,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1135,7 +1343,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1399,6 +1607,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1436,6 +1650,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1803,6 +2018,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1896,6 +2115,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1944,6 +2164,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -1955,9 +2177,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2000,6 +2230,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2051,6 +2282,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2217,6 +2450,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 @@ -2224,6 +2458,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2251,8 +2486,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2400,6 +2637,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 380913c4fce..58b642904f0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1812,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -1905,6 +2124,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -1953,6 +2173,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -1964,10 +2186,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2009,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2059,6 +2290,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2227,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 @@ -2234,6 +2468,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2261,8 +2496,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2411,6 +2648,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index fb82529ac93..35e33de6021 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1812,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -1905,6 +2124,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -1953,6 +2173,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -1964,10 +2186,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2009,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2059,6 +2290,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2227,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 @@ -2234,6 +2468,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2261,8 +2496,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2411,6 +2648,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 677d9045624..574fcd8c508 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1812,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -1905,6 +2124,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -1953,6 +2173,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -1964,10 +2186,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2009,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2059,6 +2290,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2227,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 @@ -2234,6 +2468,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2261,8 +2496,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2411,6 +2648,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7ddd09d7824..cdf0cf5f4c3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1812,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -1905,6 +2124,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -1953,6 +2173,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -1964,10 +2186,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2009,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2059,6 +2290,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2227,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 @@ -2234,6 +2468,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2261,8 +2496,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2411,6 +2648,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ebaca417b46..eefdb32864c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1003,6 +1202,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1131,7 +1339,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1398,6 +1606,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1435,6 +1649,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1870,6 +2085,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -1963,6 +2182,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2011,6 +2231,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2022,9 +2244,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2066,6 +2296,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2115,6 +2346,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2287,6 +2520,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 @@ -2294,6 +2528,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2321,8 +2556,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2470,6 +2707,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 02938cb6ed4..78db210411c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1003,6 +1202,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1131,7 +1339,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1398,6 +1606,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1435,6 +1649,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1870,6 +2085,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -1963,6 +2182,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2011,6 +2231,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2022,9 +2244,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2066,6 +2296,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2115,6 +2346,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2287,6 +2520,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 @@ -2294,6 +2528,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2321,8 +2556,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2470,6 +2707,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5aea4b9093a..0cd07f9336d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1800,6 +2015,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1893,6 +2112,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1941,6 +2161,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -1952,9 +2174,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -1996,6 +2226,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2047,6 +2278,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2213,6 +2446,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 @@ -2220,6 +2454,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2247,8 +2482,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2396,6 +2633,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 7f7c2e3e2fe..ac4f1d9f7fe 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -174,6 +174,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 @@ -195,11 +196,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -207,8 +267,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -222,20 +290,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -263,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -301,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -319,6 +442,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -413,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -486,12 +614,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -513,7 +696,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -538,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -803,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -965,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1004,6 +1203,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1132,7 +1340,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1396,6 +1604,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1433,6 +1647,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1873,6 +2088,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -1966,6 +2185,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2014,6 +2234,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2025,9 +2247,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2069,6 +2299,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2120,6 +2351,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2286,6 +2519,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 @@ -2293,6 +2527,7 @@ const ( TUNGETVNETBE = 0x800454df TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 TUNSETDEBUG = 0x400454c9 TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce @@ -2320,8 +2555,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2469,6 +2706,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 968e21fd680..8a12f1412d6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -177,6 +177,7 @@ const ( B9600 = 0xd BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 @@ -198,11 +199,70 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -210,8 +270,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -225,20 +293,35 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -266,6 +349,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -304,6 +426,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -322,6 +445,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -417,6 +544,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -490,12 +618,67 @@ const ( FALLOC_FL_PUNCH_HOLE = 0x2 FALLOC_FL_UNSHARE_RANGE = 0x40 FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -517,7 +700,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x3 + FS_POLICY_FLAGS_VALID = 0x7 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -542,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x1 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -807,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -969,6 +1154,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1008,6 +1207,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1136,7 +1344,7 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x3 + NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 @@ -1400,6 +1608,12 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -1437,6 +1651,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1865,6 +2080,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -1958,6 +2177,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x35 SO_ATTACH_REUSEPORT_EBPF = 0x36 SO_BINDTODEVICE = 0xd + SO_BINDTOIFINDEX = 0x41 SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 @@ -2006,6 +2226,8 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVTIMEO = 0x2000 + SO_RCVTIMEO_NEW = 0x44 + SO_RCVTIMEO_OLD = 0x2000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x24 @@ -2017,9 +2239,17 @@ const ( SO_SNDBUFFORCE = 0x100a SO_SNDLOWAT = 0x1000 SO_SNDTIMEO = 0x4000 + SO_SNDTIMEO_NEW = 0x45 + SO_SNDTIMEO_OLD = 0x4000 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x23 + SO_TIMESTAMPING_NEW = 0x43 + SO_TIMESTAMPING_OLD = 0x23 SO_TIMESTAMPNS = 0x21 + SO_TIMESTAMPNS_NEW = 0x42 + SO_TIMESTAMPNS_OLD = 0x21 + SO_TIMESTAMP_NEW = 0x46 + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2061,6 +2291,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2111,6 +2342,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2275,6 +2508,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 @@ -2282,6 +2516,7 @@ const ( TUNGETVNETBE = 0x400454df TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 TUNSETDEBUG = 0x800454c9 TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce @@ -2309,8 +2544,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2458,6 +2695,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go new file mode 100644 index 00000000000..ec5f92de888 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -0,0 +1,1789 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0186941 + SIOCBRDGGFD = 0xc0186952 + SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0186946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80186940 + SIOCBRDGSFD = 0x80186952 + SIOCBRDGSHT = 0x80186951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80186953 + SIOCBRDGSPRI = 0x80186950 + SIOCBRDGSPROTO = 0x8018695a + SIOCBRDGSTO = 0x80186945 + SIOCBRDGSTXHC = 0x80186959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SIOCSWGDPID = 0xc018695b + SIOCSWGMAXFLOW = 0xc0186960 + SIOCSWGMAXGROUP = 0xc018695d + SIOCSWSDPID = 0x8018695c + SIOCSWSPORTNO = 0xc060695f + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 79f6e0566e3..ed657ff1bc0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t); int pause(); int pread64(int, uintptr_t, size_t, long long); int pwrite64(int, uintptr_t, size_t, long long); +#define c_select select +int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int setregid(int, int); int setreuid(int, int); @@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t); int getsockname(int, uintptr_t, uintptr_t); int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int recvmsg(int, uintptr_t, int); -int sendmsg(int, uintptr_t, int); +int nrecvmsg(int, uintptr_t, int); +int nsendmsg(int, uintptr_t, int); int munmap(uintptr_t, uintptr_t); int madvise(uintptr_t, size_t, int); int mprotect(uintptr_t, size_t, int); @@ -118,6 +120,8 @@ int poll(uintptr_t, int, int); int gettimeofday(uintptr_t, uintptr_t); int time(uintptr_t); int utime(uintptr_t, uintptr_t); +unsigned long long getsystemcfg(int); +int umount(uintptr_t); int getrlimit64(int, uintptr_t); int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); @@ -855,7 +859,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *Stat_t) (err error) { r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { err = er @@ -865,7 +869,7 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) if r0 == -1 && er != nil { @@ -949,7 +953,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { @@ -1004,6 +1008,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, er := C.c_select(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask)))) n = int(r0) @@ -1056,9 +1071,9 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, statptr *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(statptr)))) if r0 == -1 && er != nil { err = er } @@ -1225,7 +1240,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, er := C.nrecvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) n = int(r0) if r0 == -1 && er != nil { err = er @@ -1236,7 +1251,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, er := C.nsendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) n = int(r0) if r0 == -1 && er != nil { err = er @@ -1409,6 +1424,25 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsystemcfg(label int) (n uint64) { + r0, _ := C.getsystemcfg(C.int(label)) + n = uint64(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func umount(target string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(target))) + r0, er := C.umount(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(resource int, rlim *Rlimit) (err error) { r0, er := C.getrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) if r0 == -1 && er != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index e645a05cbeb..664b293b431 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -803,7 +803,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *Stat_t) (err error) { _, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat))) if e1 != 0 { err = errnoErr(e1) @@ -813,7 +813,7 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -905,7 +905,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -960,6 +960,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, e1 := callselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, e1 := callpselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) @@ -1012,13 +1023,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, statptr *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statptr))) if e1 != 0 { err = errnoErr(e1) } @@ -1189,7 +1200,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, e1 := callrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags) + r0, e1 := callnrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1200,7 +1211,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, e1 := callsendmsg(s, uintptr(unsafe.Pointer(msg)), flags) + r0, e1 := callnsendmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1367,6 +1378,29 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func umount(target string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, e1 := callumount(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(resource int, rlim *Rlimit) (err error) { _, e1 := callgetrlimit(resource, uintptr(unsafe.Pointer(rlim))) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 0b8eb721022..4b3a8ad7bec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -85,6 +85,7 @@ import ( //go:cgo_import_dynamic libc_pause pause "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pread64 pread64 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pwrite64 pwrite64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_select select "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pselect pselect "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setregid setregid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setreuid setreuid "libc.a/shr_64.o" @@ -105,8 +106,8 @@ import ( //go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o" //go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o" //go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_nrecvmsg nrecvmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_nsendmsg nsendmsg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" //go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mprotect mprotect "libc.a/shr_64.o" @@ -120,6 +121,8 @@ import ( //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.a/shr_64.o" //go:cgo_import_dynamic libc_time time "libc.a/shr_64.o" //go:cgo_import_dynamic libc_utime utime "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" @@ -200,6 +203,7 @@ import ( //go:linkname libc_pause libc_pause //go:linkname libc_pread64 libc_pread64 //go:linkname libc_pwrite64 libc_pwrite64 +//go:linkname libc_select libc_select //go:linkname libc_pselect libc_pselect //go:linkname libc_setregid libc_setregid //go:linkname libc_setreuid libc_setreuid @@ -220,8 +224,8 @@ import ( //go:linkname libc_getsockname libc_getsockname //go:linkname libc_recvfrom libc_recvfrom //go:linkname libc_sendto libc_sendto -//go:linkname libc_recvmsg libc_recvmsg -//go:linkname libc_sendmsg libc_sendmsg +//go:linkname libc_nrecvmsg libc_nrecvmsg +//go:linkname libc_nsendmsg libc_nsendmsg //go:linkname libc_munmap libc_munmap //go:linkname libc_madvise libc_madvise //go:linkname libc_mprotect libc_mprotect @@ -235,6 +239,8 @@ import ( //go:linkname libc_gettimeofday libc_gettimeofday //go:linkname libc_time libc_time //go:linkname libc_utime libc_utime +//go:linkname libc_getsystemcfg libc_getsystemcfg +//go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit //go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek @@ -318,6 +324,7 @@ var ( libc_pause, libc_pread64, libc_pwrite64, + libc_select, libc_pselect, libc_setregid, libc_setreuid, @@ -338,8 +345,8 @@ var ( libc_getsockname, libc_recvfrom, libc_sendto, - libc_recvmsg, - libc_sendmsg, + libc_nrecvmsg, + libc_nsendmsg, libc_munmap, libc_madvise, libc_mprotect, @@ -353,6 +360,8 @@ var ( libc_gettimeofday, libc_time, libc_utime, + libc_getsystemcfg, + libc_umount, libc_getrlimit, libc_setrlimit, libc_lseek, @@ -890,6 +899,13 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_select)), 5, uintptr(nfd), r, w, e, timeout, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pselect)), 6, uintptr(nfd), r, w, e, timeout, sigmask) return @@ -925,8 +941,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0) +func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, statptr, 0, 0, 0, 0) return } @@ -1030,15 +1046,15 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) +func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nrecvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) +func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nsendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) return } @@ -1135,6 +1151,20 @@ func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_umount)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) return diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index e88a442787d..cde4dbc5f54 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t); int pause(); int pread64(int, uintptr_t, size_t, long long); int pwrite64(int, uintptr_t, size_t, long long); +#define c_select select +int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int setregid(int, int); int setreuid(int, int); @@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t); int getsockname(int, uintptr_t, uintptr_t); int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int recvmsg(int, uintptr_t, int); -int sendmsg(int, uintptr_t, int); +int nrecvmsg(int, uintptr_t, int); +int nsendmsg(int, uintptr_t, int); int munmap(uintptr_t, uintptr_t); int madvise(uintptr_t, size_t, int); int mprotect(uintptr_t, size_t, int); @@ -118,6 +120,8 @@ int poll(uintptr_t, int, int); int gettimeofday(uintptr_t, uintptr_t); int time(uintptr_t); int utime(uintptr_t, uintptr_t); +unsigned long long getsystemcfg(int); +int umount(uintptr_t); int getrlimit(int, uintptr_t); int setrlimit(int, uintptr_t); long long lseek(int, long long, int); @@ -731,6 +735,14 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.c_select(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.pselect(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout), C.uintptr_t(sigmask))) e1 = syscall.GetErrno() @@ -771,8 +783,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat))) +func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(statptr))) e1 = syscall.GetErrno() return } @@ -891,16 +903,16 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.recvmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) +func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.nrecvmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) e1 = syscall.GetErrno() return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.sendmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) +func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.nsendmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) e1 = syscall.GetErrno() return } @@ -1011,6 +1023,22 @@ func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getsystemcfg(C.int(label))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.umount(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getrlimit(C.int(resource), C.uintptr_t(rlim))) e1 = syscall.GetErrno() diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c4ec7ff87cf..dd5ea36ee97 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -377,16 +377,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1691,6 +1681,16 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 23346dc68ff..78ca923396b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2341,6 +2326,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 37b85b4f612..f40465ca89e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c142e33e92e..64df03c4520 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2356,6 +2341,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a3915197d4..debcb8ed3a8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -266,6 +264,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 01cffbf46cc..ed330623997 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 994056f3596..66af9f480e5 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 8f2691deea9..5258a73282a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 61dc0d4c129..f57f48f826d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index ae9f1a21e62..cdfe9318ba0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -749,6 +749,23 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 80903e47b65..a783306b2ad 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index cd250ff0e24..f995520d38d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 290a9c2cb06..d681acd4300 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index c6df9d2e8f6..5049b2ede45 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -404,6 +404,16 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c8b451000b0..c5e46e4cf66 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2aac3184bc0..da8819e480d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 13c06c28158..6ad9be6dd48 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,42 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -2309,3 +2390,18 @@ func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 737fa8d1812..f88331782b3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -2206,3 +2277,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 0a85f3f8db3..8eebc6c77c0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ec7007e781d..ecf62a677d8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index c5bb25d9643..1ba0f7b6f4e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 26ada0478f9..20012b2f0ec 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2da9cb700a7..2b520deaa2f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 772733d83ff..d9f044c9534 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 996eba517ac..9feed65eb0b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -2186,3 +2257,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index cb9072a33a4..0a651508814 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5e48a1001b9..e27f66930c0 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -14,6 +14,27 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fchmodat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1360,8 +1401,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1658,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 642db7670a2..7e05826647e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 59585fee354..d94d076aa01 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 6ec31434b21..cf5bf3d0546 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 603d1443349..243a9317cf2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 6a489fac0a6..a9532d07870 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 30cba4347c1..0cb9f01774b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index fa1beda33e3..6fc99b54947 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go new file mode 100644 index 00000000000..27878a72b83 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -0,0 +1,1692 @@ +// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build openbsd,arm64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 97b22a499ed..5f614760c6c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1,4 +1,4 @@ -// mksyscall_solaris.pl -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go +// go run mksyscall_solaris.go -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build solaris,amd64 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index b005031abed..37dcc74c2de 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,6 +1,8 @@ // mksysctl_openbsd.pl // Code generated by the command above; DO NOT EDIT. +// +build 386,openbsd + package unix type mibentry struct { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d014451c9d8..fe6caa6eb7f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,4 +1,4 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. // +build amd64,openbsd diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index b005031abed..6eb8c0b086a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,6 +1,8 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +// +build arm,openbsd + package unix type mibentry struct { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go new file mode 100644 index 00000000000..ba4304fd233 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -0,0 +1,275 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 55c3a32945d..9474974b657 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index b39be6cb8f4..48a7beae7bb 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 44ffd4ce5e9..4a6dfd4a745 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 9f21e9550ed..3e51af8edd2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -7,13 +7,13 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } SYS_LINK = 9 // { int link(char *path, char *link); } SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } @@ -21,20 +21,20 @@ const ( SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } SYS_SETUID = 23 // { int setuid(uid_t uid); } SYS_GETUID = 24 // { uid_t getuid(void); } SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } SYS_ACCESS = 33 // { int access(char *path, int amode); } SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } @@ -42,56 +42,57 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } SYS_REBOOT = 55 // { int reboot(int opt); } SYS_REVOKE = 56 // { int revoke(char *path); } SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } SYS_GETPGRP = 81 // { int getpgrp(void); } SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } @@ -99,24 +100,24 @@ const ( SYS_RENAME = 128 // { int rename(char *from, char *to); } SYS_FLOCK = 131 // { int flock(int fd, int how); } SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ - SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ - SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -127,269 +128,269 @@ const ( SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } SYS_UNDELETE = 205 // { int undelete(char *path); } SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ - SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } - SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ - SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } - SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ - SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } SYS_SCHED_YIELD = 331 // { int sched_yield (void); } SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } SYS___SETUGID = 374 // { int __setugid(int flag); } SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } - SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ - SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } SYS_GETAUID = 447 // { int getauid(uid_t *auid); } SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } - SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ - SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ - SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ - SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } SYS_CAP_ENTER = 516 // { int cap_enter(void); } SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, \ - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, \ + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 8d17873de0f..e869c060318 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -6,387 +6,427 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86OLD = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_VM86 = 166 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_SET_THREAD_AREA = 243 - SYS_GET_THREAD_AREA = 244 - SYS_IO_SETUP = 245 - SYS_IO_DESTROY = 246 - SYS_IO_GETEVENTS = 247 - SYS_IO_SUBMIT = 248 - SYS_IO_CANCEL = 249 - SYS_FADVISE64 = 250 - SYS_EXIT_GROUP = 252 - SYS_LOOKUP_DCOOKIE = 253 - SYS_EPOLL_CREATE = 254 - SYS_EPOLL_CTL = 255 - SYS_EPOLL_WAIT = 256 - SYS_REMAP_FILE_PAGES = 257 - SYS_SET_TID_ADDRESS = 258 - SYS_TIMER_CREATE = 259 - SYS_TIMER_SETTIME = 260 - SYS_TIMER_GETTIME = 261 - SYS_TIMER_GETOVERRUN = 262 - SYS_TIMER_DELETE = 263 - SYS_CLOCK_SETTIME = 264 - SYS_CLOCK_GETTIME = 265 - SYS_CLOCK_GETRES = 266 - SYS_CLOCK_NANOSLEEP = 267 - SYS_STATFS64 = 268 - SYS_FSTATFS64 = 269 - SYS_TGKILL = 270 - SYS_UTIMES = 271 - SYS_FADVISE64_64 = 272 - SYS_VSERVER = 273 - SYS_MBIND = 274 - SYS_GET_MEMPOLICY = 275 - SYS_SET_MEMPOLICY = 276 - SYS_MQ_OPEN = 277 - SYS_MQ_UNLINK = 278 - SYS_MQ_TIMEDSEND = 279 - SYS_MQ_TIMEDRECEIVE = 280 - SYS_MQ_NOTIFY = 281 - SYS_MQ_GETSETATTR = 282 - SYS_KEXEC_LOAD = 283 - SYS_WAITID = 284 - SYS_ADD_KEY = 286 - SYS_REQUEST_KEY = 287 - SYS_KEYCTL = 288 - SYS_IOPRIO_SET = 289 - SYS_IOPRIO_GET = 290 - SYS_INOTIFY_INIT = 291 - SYS_INOTIFY_ADD_WATCH = 292 - SYS_INOTIFY_RM_WATCH = 293 - SYS_MIGRATE_PAGES = 294 - SYS_OPENAT = 295 - SYS_MKDIRAT = 296 - SYS_MKNODAT = 297 - SYS_FCHOWNAT = 298 - SYS_FUTIMESAT = 299 - SYS_FSTATAT64 = 300 - SYS_UNLINKAT = 301 - SYS_RENAMEAT = 302 - SYS_LINKAT = 303 - SYS_SYMLINKAT = 304 - SYS_READLINKAT = 305 - SYS_FCHMODAT = 306 - SYS_FACCESSAT = 307 - SYS_PSELECT6 = 308 - SYS_PPOLL = 309 - SYS_UNSHARE = 310 - SYS_SET_ROBUST_LIST = 311 - SYS_GET_ROBUST_LIST = 312 - SYS_SPLICE = 313 - SYS_SYNC_FILE_RANGE = 314 - SYS_TEE = 315 - SYS_VMSPLICE = 316 - SYS_MOVE_PAGES = 317 - SYS_GETCPU = 318 - SYS_EPOLL_PWAIT = 319 - SYS_UTIMENSAT = 320 - SYS_SIGNALFD = 321 - SYS_TIMERFD_CREATE = 322 - SYS_EVENTFD = 323 - SYS_FALLOCATE = 324 - SYS_TIMERFD_SETTIME = 325 - SYS_TIMERFD_GETTIME = 326 - SYS_SIGNALFD4 = 327 - SYS_EVENTFD2 = 328 - SYS_EPOLL_CREATE1 = 329 - SYS_DUP3 = 330 - SYS_PIPE2 = 331 - SYS_INOTIFY_INIT1 = 332 - SYS_PREADV = 333 - SYS_PWRITEV = 334 - SYS_RT_TGSIGQUEUEINFO = 335 - SYS_PERF_EVENT_OPEN = 336 - SYS_RECVMMSG = 337 - SYS_FANOTIFY_INIT = 338 - SYS_FANOTIFY_MARK = 339 - SYS_PRLIMIT64 = 340 - SYS_NAME_TO_HANDLE_AT = 341 - SYS_OPEN_BY_HANDLE_AT = 342 - SYS_CLOCK_ADJTIME = 343 - SYS_SYNCFS = 344 - SYS_SENDMMSG = 345 - SYS_SETNS = 346 - SYS_PROCESS_VM_READV = 347 - SYS_PROCESS_VM_WRITEV = 348 - SYS_KCMP = 349 - SYS_FINIT_MODULE = 350 - SYS_SCHED_SETATTR = 351 - SYS_SCHED_GETATTR = 352 - SYS_RENAMEAT2 = 353 - SYS_SECCOMP = 354 - SYS_GETRANDOM = 355 - SYS_MEMFD_CREATE = 356 - SYS_BPF = 357 - SYS_EXECVEAT = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_USERFAULTFD = 374 - SYS_MEMBARRIER = 375 - SYS_MLOCK2 = 376 - SYS_COPY_FILE_RANGE = 377 - SYS_PREADV2 = 378 - SYS_PWRITEV2 = 379 - SYS_PKEY_MPROTECT = 380 - SYS_PKEY_ALLOC = 381 - SYS_PKEY_FREE = 382 - SYS_STATX = 383 - SYS_ARCH_PRCTL = 384 - SYS_IO_PGETEVENTS = 385 - SYS_RSEQ = 386 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86OLD = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_VM86 = 166 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_SET_THREAD_AREA = 243 + SYS_GET_THREAD_AREA = 244 + SYS_IO_SETUP = 245 + SYS_IO_DESTROY = 246 + SYS_IO_GETEVENTS = 247 + SYS_IO_SUBMIT = 248 + SYS_IO_CANCEL = 249 + SYS_FADVISE64 = 250 + SYS_EXIT_GROUP = 252 + SYS_LOOKUP_DCOOKIE = 253 + SYS_EPOLL_CREATE = 254 + SYS_EPOLL_CTL = 255 + SYS_EPOLL_WAIT = 256 + SYS_REMAP_FILE_PAGES = 257 + SYS_SET_TID_ADDRESS = 258 + SYS_TIMER_CREATE = 259 + SYS_TIMER_SETTIME = 260 + SYS_TIMER_GETTIME = 261 + SYS_TIMER_GETOVERRUN = 262 + SYS_TIMER_DELETE = 263 + SYS_CLOCK_SETTIME = 264 + SYS_CLOCK_GETTIME = 265 + SYS_CLOCK_GETRES = 266 + SYS_CLOCK_NANOSLEEP = 267 + SYS_STATFS64 = 268 + SYS_FSTATFS64 = 269 + SYS_TGKILL = 270 + SYS_UTIMES = 271 + SYS_FADVISE64_64 = 272 + SYS_VSERVER = 273 + SYS_MBIND = 274 + SYS_GET_MEMPOLICY = 275 + SYS_SET_MEMPOLICY = 276 + SYS_MQ_OPEN = 277 + SYS_MQ_UNLINK = 278 + SYS_MQ_TIMEDSEND = 279 + SYS_MQ_TIMEDRECEIVE = 280 + SYS_MQ_NOTIFY = 281 + SYS_MQ_GETSETATTR = 282 + SYS_KEXEC_LOAD = 283 + SYS_WAITID = 284 + SYS_ADD_KEY = 286 + SYS_REQUEST_KEY = 287 + SYS_KEYCTL = 288 + SYS_IOPRIO_SET = 289 + SYS_IOPRIO_GET = 290 + SYS_INOTIFY_INIT = 291 + SYS_INOTIFY_ADD_WATCH = 292 + SYS_INOTIFY_RM_WATCH = 293 + SYS_MIGRATE_PAGES = 294 + SYS_OPENAT = 295 + SYS_MKDIRAT = 296 + SYS_MKNODAT = 297 + SYS_FCHOWNAT = 298 + SYS_FUTIMESAT = 299 + SYS_FSTATAT64 = 300 + SYS_UNLINKAT = 301 + SYS_RENAMEAT = 302 + SYS_LINKAT = 303 + SYS_SYMLINKAT = 304 + SYS_READLINKAT = 305 + SYS_FCHMODAT = 306 + SYS_FACCESSAT = 307 + SYS_PSELECT6 = 308 + SYS_PPOLL = 309 + SYS_UNSHARE = 310 + SYS_SET_ROBUST_LIST = 311 + SYS_GET_ROBUST_LIST = 312 + SYS_SPLICE = 313 + SYS_SYNC_FILE_RANGE = 314 + SYS_TEE = 315 + SYS_VMSPLICE = 316 + SYS_MOVE_PAGES = 317 + SYS_GETCPU = 318 + SYS_EPOLL_PWAIT = 319 + SYS_UTIMENSAT = 320 + SYS_SIGNALFD = 321 + SYS_TIMERFD_CREATE = 322 + SYS_EVENTFD = 323 + SYS_FALLOCATE = 324 + SYS_TIMERFD_SETTIME = 325 + SYS_TIMERFD_GETTIME = 326 + SYS_SIGNALFD4 = 327 + SYS_EVENTFD2 = 328 + SYS_EPOLL_CREATE1 = 329 + SYS_DUP3 = 330 + SYS_PIPE2 = 331 + SYS_INOTIFY_INIT1 = 332 + SYS_PREADV = 333 + SYS_PWRITEV = 334 + SYS_RT_TGSIGQUEUEINFO = 335 + SYS_PERF_EVENT_OPEN = 336 + SYS_RECVMMSG = 337 + SYS_FANOTIFY_INIT = 338 + SYS_FANOTIFY_MARK = 339 + SYS_PRLIMIT64 = 340 + SYS_NAME_TO_HANDLE_AT = 341 + SYS_OPEN_BY_HANDLE_AT = 342 + SYS_CLOCK_ADJTIME = 343 + SYS_SYNCFS = 344 + SYS_SENDMMSG = 345 + SYS_SETNS = 346 + SYS_PROCESS_VM_READV = 347 + SYS_PROCESS_VM_WRITEV = 348 + SYS_KCMP = 349 + SYS_FINIT_MODULE = 350 + SYS_SCHED_SETATTR = 351 + SYS_SCHED_GETATTR = 352 + SYS_RENAMEAT2 = 353 + SYS_SECCOMP = 354 + SYS_GETRANDOM = 355 + SYS_MEMFD_CREATE = 356 + SYS_BPF = 357 + SYS_EXECVEAT = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_USERFAULTFD = 374 + SYS_MEMBARRIER = 375 + SYS_MLOCK2 = 376 + SYS_COPY_FILE_RANGE = 377 + SYS_PREADV2 = 378 + SYS_PWRITEV2 = 379 + SYS_PKEY_MPROTECT = 380 + SYS_PKEY_ALLOC = 381 + SYS_PKEY_FREE = 382 + SYS_STATX = 383 + SYS_ARCH_PRCTL = 384 + SYS_IO_PGETEVENTS = 385 + SYS_RSEQ = 386 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index b3d8ad79d42..4917b8ab6d4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,4 +341,14 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index e092822fbad..f85fcb4f80b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -6,359 +6,391 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_PTRACE = 26 - SYS_PAUSE = 29 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_VHANGUP = 111 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_GETDENTS64 = 217 - SYS_PIVOT_ROOT = 218 - SYS_MINCORE = 219 - SYS_MADVISE = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_LOOKUP_DCOOKIE = 249 - SYS_EPOLL_CREATE = 250 - SYS_EPOLL_CTL = 251 - SYS_EPOLL_WAIT = 252 - SYS_REMAP_FILE_PAGES = 253 - SYS_SET_TID_ADDRESS = 256 - SYS_TIMER_CREATE = 257 - SYS_TIMER_SETTIME = 258 - SYS_TIMER_GETTIME = 259 - SYS_TIMER_GETOVERRUN = 260 - SYS_TIMER_DELETE = 261 - SYS_CLOCK_SETTIME = 262 - SYS_CLOCK_GETTIME = 263 - SYS_CLOCK_GETRES = 264 - SYS_CLOCK_NANOSLEEP = 265 - SYS_STATFS64 = 266 - SYS_FSTATFS64 = 267 - SYS_TGKILL = 268 - SYS_UTIMES = 269 - SYS_ARM_FADVISE64_64 = 270 - SYS_PCICONFIG_IOBASE = 271 - SYS_PCICONFIG_READ = 272 - SYS_PCICONFIG_WRITE = 273 - SYS_MQ_OPEN = 274 - SYS_MQ_UNLINK = 275 - SYS_MQ_TIMEDSEND = 276 - SYS_MQ_TIMEDRECEIVE = 277 - SYS_MQ_NOTIFY = 278 - SYS_MQ_GETSETATTR = 279 - SYS_WAITID = 280 - SYS_SOCKET = 281 - SYS_BIND = 282 - SYS_CONNECT = 283 - SYS_LISTEN = 284 - SYS_ACCEPT = 285 - SYS_GETSOCKNAME = 286 - SYS_GETPEERNAME = 287 - SYS_SOCKETPAIR = 288 - SYS_SEND = 289 - SYS_SENDTO = 290 - SYS_RECV = 291 - SYS_RECVFROM = 292 - SYS_SHUTDOWN = 293 - SYS_SETSOCKOPT = 294 - SYS_GETSOCKOPT = 295 - SYS_SENDMSG = 296 - SYS_RECVMSG = 297 - SYS_SEMOP = 298 - SYS_SEMGET = 299 - SYS_SEMCTL = 300 - SYS_MSGSND = 301 - SYS_MSGRCV = 302 - SYS_MSGGET = 303 - SYS_MSGCTL = 304 - SYS_SHMAT = 305 - SYS_SHMDT = 306 - SYS_SHMGET = 307 - SYS_SHMCTL = 308 - SYS_ADD_KEY = 309 - SYS_REQUEST_KEY = 310 - SYS_KEYCTL = 311 - SYS_SEMTIMEDOP = 312 - SYS_VSERVER = 313 - SYS_IOPRIO_SET = 314 - SYS_IOPRIO_GET = 315 - SYS_INOTIFY_INIT = 316 - SYS_INOTIFY_ADD_WATCH = 317 - SYS_INOTIFY_RM_WATCH = 318 - SYS_MBIND = 319 - SYS_GET_MEMPOLICY = 320 - SYS_SET_MEMPOLICY = 321 - SYS_OPENAT = 322 - SYS_MKDIRAT = 323 - SYS_MKNODAT = 324 - SYS_FCHOWNAT = 325 - SYS_FUTIMESAT = 326 - SYS_FSTATAT64 = 327 - SYS_UNLINKAT = 328 - SYS_RENAMEAT = 329 - SYS_LINKAT = 330 - SYS_SYMLINKAT = 331 - SYS_READLINKAT = 332 - SYS_FCHMODAT = 333 - SYS_FACCESSAT = 334 - SYS_PSELECT6 = 335 - SYS_PPOLL = 336 - SYS_UNSHARE = 337 - SYS_SET_ROBUST_LIST = 338 - SYS_GET_ROBUST_LIST = 339 - SYS_SPLICE = 340 - SYS_ARM_SYNC_FILE_RANGE = 341 - SYS_TEE = 342 - SYS_VMSPLICE = 343 - SYS_MOVE_PAGES = 344 - SYS_GETCPU = 345 - SYS_EPOLL_PWAIT = 346 - SYS_KEXEC_LOAD = 347 - SYS_UTIMENSAT = 348 - SYS_SIGNALFD = 349 - SYS_TIMERFD_CREATE = 350 - SYS_EVENTFD = 351 - SYS_FALLOCATE = 352 - SYS_TIMERFD_SETTIME = 353 - SYS_TIMERFD_GETTIME = 354 - SYS_SIGNALFD4 = 355 - SYS_EVENTFD2 = 356 - SYS_EPOLL_CREATE1 = 357 - SYS_DUP3 = 358 - SYS_PIPE2 = 359 - SYS_INOTIFY_INIT1 = 360 - SYS_PREADV = 361 - SYS_PWRITEV = 362 - SYS_RT_TGSIGQUEUEINFO = 363 - SYS_PERF_EVENT_OPEN = 364 - SYS_RECVMMSG = 365 - SYS_ACCEPT4 = 366 - SYS_FANOTIFY_INIT = 367 - SYS_FANOTIFY_MARK = 368 - SYS_PRLIMIT64 = 369 - SYS_NAME_TO_HANDLE_AT = 370 - SYS_OPEN_BY_HANDLE_AT = 371 - SYS_CLOCK_ADJTIME = 372 - SYS_SYNCFS = 373 - SYS_SENDMMSG = 374 - SYS_SETNS = 375 - SYS_PROCESS_VM_READV = 376 - SYS_PROCESS_VM_WRITEV = 377 - SYS_KCMP = 378 - SYS_FINIT_MODULE = 379 - SYS_SCHED_SETATTR = 380 - SYS_SCHED_GETATTR = 381 - SYS_RENAMEAT2 = 382 - SYS_SECCOMP = 383 - SYS_GETRANDOM = 384 - SYS_MEMFD_CREATE = 385 - SYS_BPF = 386 - SYS_EXECVEAT = 387 - SYS_USERFAULTFD = 388 - SYS_MEMBARRIER = 389 - SYS_MLOCK2 = 390 - SYS_COPY_FILE_RANGE = 391 - SYS_PREADV2 = 392 - SYS_PWRITEV2 = 393 - SYS_PKEY_MPROTECT = 394 - SYS_PKEY_ALLOC = 395 - SYS_PKEY_FREE = 396 - SYS_STATX = 397 - SYS_RSEQ = 398 - SYS_IO_PGETEVENTS = 399 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_PTRACE = 26 + SYS_PAUSE = 29 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_VHANGUP = 111 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_GETDENTS64 = 217 + SYS_PIVOT_ROOT = 218 + SYS_MINCORE = 219 + SYS_MADVISE = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_LOOKUP_DCOOKIE = 249 + SYS_EPOLL_CREATE = 250 + SYS_EPOLL_CTL = 251 + SYS_EPOLL_WAIT = 252 + SYS_REMAP_FILE_PAGES = 253 + SYS_SET_TID_ADDRESS = 256 + SYS_TIMER_CREATE = 257 + SYS_TIMER_SETTIME = 258 + SYS_TIMER_GETTIME = 259 + SYS_TIMER_GETOVERRUN = 260 + SYS_TIMER_DELETE = 261 + SYS_CLOCK_SETTIME = 262 + SYS_CLOCK_GETTIME = 263 + SYS_CLOCK_GETRES = 264 + SYS_CLOCK_NANOSLEEP = 265 + SYS_STATFS64 = 266 + SYS_FSTATFS64 = 267 + SYS_TGKILL = 268 + SYS_UTIMES = 269 + SYS_ARM_FADVISE64_64 = 270 + SYS_PCICONFIG_IOBASE = 271 + SYS_PCICONFIG_READ = 272 + SYS_PCICONFIG_WRITE = 273 + SYS_MQ_OPEN = 274 + SYS_MQ_UNLINK = 275 + SYS_MQ_TIMEDSEND = 276 + SYS_MQ_TIMEDRECEIVE = 277 + SYS_MQ_NOTIFY = 278 + SYS_MQ_GETSETATTR = 279 + SYS_WAITID = 280 + SYS_SOCKET = 281 + SYS_BIND = 282 + SYS_CONNECT = 283 + SYS_LISTEN = 284 + SYS_ACCEPT = 285 + SYS_GETSOCKNAME = 286 + SYS_GETPEERNAME = 287 + SYS_SOCKETPAIR = 288 + SYS_SEND = 289 + SYS_SENDTO = 290 + SYS_RECV = 291 + SYS_RECVFROM = 292 + SYS_SHUTDOWN = 293 + SYS_SETSOCKOPT = 294 + SYS_GETSOCKOPT = 295 + SYS_SENDMSG = 296 + SYS_RECVMSG = 297 + SYS_SEMOP = 298 + SYS_SEMGET = 299 + SYS_SEMCTL = 300 + SYS_MSGSND = 301 + SYS_MSGRCV = 302 + SYS_MSGGET = 303 + SYS_MSGCTL = 304 + SYS_SHMAT = 305 + SYS_SHMDT = 306 + SYS_SHMGET = 307 + SYS_SHMCTL = 308 + SYS_ADD_KEY = 309 + SYS_REQUEST_KEY = 310 + SYS_KEYCTL = 311 + SYS_SEMTIMEDOP = 312 + SYS_VSERVER = 313 + SYS_IOPRIO_SET = 314 + SYS_IOPRIO_GET = 315 + SYS_INOTIFY_INIT = 316 + SYS_INOTIFY_ADD_WATCH = 317 + SYS_INOTIFY_RM_WATCH = 318 + SYS_MBIND = 319 + SYS_GET_MEMPOLICY = 320 + SYS_SET_MEMPOLICY = 321 + SYS_OPENAT = 322 + SYS_MKDIRAT = 323 + SYS_MKNODAT = 324 + SYS_FCHOWNAT = 325 + SYS_FUTIMESAT = 326 + SYS_FSTATAT64 = 327 + SYS_UNLINKAT = 328 + SYS_RENAMEAT = 329 + SYS_LINKAT = 330 + SYS_SYMLINKAT = 331 + SYS_READLINKAT = 332 + SYS_FCHMODAT = 333 + SYS_FACCESSAT = 334 + SYS_PSELECT6 = 335 + SYS_PPOLL = 336 + SYS_UNSHARE = 337 + SYS_SET_ROBUST_LIST = 338 + SYS_GET_ROBUST_LIST = 339 + SYS_SPLICE = 340 + SYS_ARM_SYNC_FILE_RANGE = 341 + SYS_TEE = 342 + SYS_VMSPLICE = 343 + SYS_MOVE_PAGES = 344 + SYS_GETCPU = 345 + SYS_EPOLL_PWAIT = 346 + SYS_KEXEC_LOAD = 347 + SYS_UTIMENSAT = 348 + SYS_SIGNALFD = 349 + SYS_TIMERFD_CREATE = 350 + SYS_EVENTFD = 351 + SYS_FALLOCATE = 352 + SYS_TIMERFD_SETTIME = 353 + SYS_TIMERFD_GETTIME = 354 + SYS_SIGNALFD4 = 355 + SYS_EVENTFD2 = 356 + SYS_EPOLL_CREATE1 = 357 + SYS_DUP3 = 358 + SYS_PIPE2 = 359 + SYS_INOTIFY_INIT1 = 360 + SYS_PREADV = 361 + SYS_PWRITEV = 362 + SYS_RT_TGSIGQUEUEINFO = 363 + SYS_PERF_EVENT_OPEN = 364 + SYS_RECVMMSG = 365 + SYS_ACCEPT4 = 366 + SYS_FANOTIFY_INIT = 367 + SYS_FANOTIFY_MARK = 368 + SYS_PRLIMIT64 = 369 + SYS_NAME_TO_HANDLE_AT = 370 + SYS_OPEN_BY_HANDLE_AT = 371 + SYS_CLOCK_ADJTIME = 372 + SYS_SYNCFS = 373 + SYS_SENDMMSG = 374 + SYS_SETNS = 375 + SYS_PROCESS_VM_READV = 376 + SYS_PROCESS_VM_WRITEV = 377 + SYS_KCMP = 378 + SYS_FINIT_MODULE = 379 + SYS_SCHED_SETATTR = 380 + SYS_SCHED_GETATTR = 381 + SYS_RENAMEAT2 = 382 + SYS_SECCOMP = 383 + SYS_GETRANDOM = 384 + SYS_MEMFD_CREATE = 385 + SYS_BPF = 386 + SYS_EXECVEAT = 387 + SYS_USERFAULTFD = 388 + SYS_MEMBARRIER = 389 + SYS_MLOCK2 = 390 + SYS_COPY_FILE_RANGE = 391 + SYS_PREADV2 = 392 + SYS_PWRITEV2 = 393 + SYS_PKEY_MPROTECT = 394 + SYS_PKEY_ALLOC = 395 + SYS_PKEY_FREE = 396 + SYS_STATX = 397 + SYS_RSEQ = 398 + SYS_IO_PGETEVENTS = 399 + SYS_MIGRATE_PAGES = 400 + SYS_KEXEC_FILE_LOAD = 401 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 3206967896a..678a119bc9e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -285,4 +285,15 @@ const ( SYS_STATX = 291 SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 6893a5bd055..222c9f9a2f9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -6,372 +6,412 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 + SYS_SEMGET = 4393 + SYS_SEMCTL = 4394 + SYS_SHMGET = 4395 + SYS_SHMCTL = 4396 + SYS_SHMAT = 4397 + SYS_SHMDT = 4398 + SYS_MSGGET = 4399 + SYS_MSGSND = 4400 + SYS_MSGRCV = 4401 + SYS_MSGCTL = 4402 + SYS_CLOCK_GETTIME64 = 4403 + SYS_CLOCK_SETTIME64 = 4404 + SYS_CLOCK_ADJTIME64 = 4405 + SYS_CLOCK_GETRES_TIME64 = 4406 + SYS_CLOCK_NANOSLEEP_TIME64 = 4407 + SYS_TIMER_GETTIME64 = 4408 + SYS_TIMER_SETTIME64 = 4409 + SYS_TIMERFD_GETTIME64 = 4410 + SYS_TIMERFD_SETTIME64 = 4411 + SYS_UTIMENSAT_TIME64 = 4412 + SYS_PSELECT6_TIME64 = 4413 + SYS_PPOLL_TIME64 = 4414 + SYS_IO_PGETEVENTS_TIME64 = 4416 + SYS_RECVMMSG_TIME64 = 4417 + SYS_MQ_TIMEDSEND_TIME64 = 4418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 + SYS_SEMTIMEDOP_TIME64 = 4420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 + SYS_FUTEX_TIME64 = 4422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 + SYS_PIDFD_SEND_SIGNAL = 4424 + SYS_IO_URING_SETUP = 4425 + SYS_IO_URING_ENTER = 4426 + SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 40164cacdf5..28e6d0e9d66 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -334,4 +334,14 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 8a909738bc0..e643c6f6322 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -334,4 +334,14 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 8d781842245..01d93c420f4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -6,372 +6,412 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 + SYS_SEMGET = 4393 + SYS_SEMCTL = 4394 + SYS_SHMGET = 4395 + SYS_SHMCTL = 4396 + SYS_SHMAT = 4397 + SYS_SHMDT = 4398 + SYS_MSGGET = 4399 + SYS_MSGSND = 4400 + SYS_MSGRCV = 4401 + SYS_MSGCTL = 4402 + SYS_CLOCK_GETTIME64 = 4403 + SYS_CLOCK_SETTIME64 = 4404 + SYS_CLOCK_ADJTIME64 = 4405 + SYS_CLOCK_GETRES_TIME64 = 4406 + SYS_CLOCK_NANOSLEEP_TIME64 = 4407 + SYS_TIMER_GETTIME64 = 4408 + SYS_TIMER_SETTIME64 = 4409 + SYS_TIMERFD_GETTIME64 = 4410 + SYS_TIMERFD_SETTIME64 = 4411 + SYS_UTIMENSAT_TIME64 = 4412 + SYS_PSELECT6_TIME64 = 4413 + SYS_PPOLL_TIME64 = 4414 + SYS_IO_PGETEVENTS_TIME64 = 4416 + SYS_RECVMMSG_TIME64 = 4417 + SYS_MQ_TIMEDSEND_TIME64 = 4418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 + SYS_SEMTIMEDOP_TIME64 = 4420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 + SYS_FUTEX_TIME64 = 4422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 + SYS_PIDFD_SEND_SIGNAL = 4424 + SYS_IO_URING_SETUP = 4425 + SYS_IO_URING_ENTER = 4426 + SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index ec5bde3d563..5744149ebf9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -372,4 +372,25 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index bdbabdbcdb1..21c83204281 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -372,4 +372,25 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 473c74613f6..c1bb6d8f2d8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -284,4 +284,15 @@ const ( SYS_STATX = 291 SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 6eb7c257f8c..bc3cc6b5b24 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -334,4 +334,28 @@ const ( SYS_KEXEC_FILE_LOAD = 381 SYS_IO_PGETEVENTS = 382 SYS_RSEQ = 383 + SYS_PKEY_MPROTECT = 384 + SYS_PKEY_ALLOC = 385 + SYS_PKEY_FREE = 386 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 93480fcb168..0a2841ba8ce 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -253,6 +253,7 @@ const ( SYS_TIMER_GETOVERRUN = 264 SYS_TIMER_DELETE = 265 SYS_TIMER_CREATE = 266 + SYS_VSERVER = 267 SYS_IO_SETUP = 268 SYS_IO_DESTROY = 269 SYS_IO_SUBMIT = 270 @@ -347,4 +348,29 @@ const ( SYS_PWRITEV2 = 359 SYS_STATX = 360 SYS_IO_PGETEVENTS = 361 + SYS_PKEY_MPROTECT = 362 + SYS_PKEY_ALLOC = 363 + SYS_PKEY_FREE = 364 + SYS_RSEQ = 365 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go new file mode 100644 index 00000000000..fe2b689b637 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -0,0 +1,217 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index cedc9b0f26d..2c1f815e6f9 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -30,11 +30,6 @@ type Timespec struct { Nsec int32 } -type StTimespec struct { - Sec int32 - Nsec int32 -} - type Timeval struct { Sec int32 Usec int32 @@ -101,9 +96,9 @@ type Stat_t struct { Gid uint32 Rdev uint32 Size int32 - Atim StTimespec - Mtim StTimespec - Ctim StTimespec + Atim Timespec + Mtim Timespec + Ctim Timespec Blksize int32 Blocks int32 Vfstype int32 @@ -148,6 +143,17 @@ type RawSockaddrUnix struct { Path [1023]uint8 } +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [120]uint8 +} + type RawSockaddr struct { Len uint8 Family uint8 @@ -207,17 +213,18 @@ type Msghdr struct { } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofSockaddrDatalink = 0x80 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index f46482d272e..b4a069ecbdf 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -30,12 +30,6 @@ type Timespec struct { Nsec int64 } -type StTimespec struct { - Sec int64 - Nsec int32 - _ [4]byte -} - type Timeval struct { Sec int64 Usec int32 @@ -103,10 +97,9 @@ type Stat_t struct { Gid uint32 Rdev uint64 Ssize int32 - _ [4]byte - Atim StTimespec - Mtim StTimespec - Ctim StTimespec + Atim Timespec + Mtim Timespec + Ctim Timespec Blksize int64 Blocks int64 Vfstype int32 @@ -154,6 +147,17 @@ type RawSockaddrUnix struct { Path [1023]uint8 } +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [120]uint8 +} + type RawSockaddr struct { Len uint8 Family uint8 @@ -205,27 +209,26 @@ type Linger struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofSockaddrDatalink = 0x80 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( @@ -339,7 +342,6 @@ type Statfs_t struct { Ffree uint64 Fsid Fsid64_t Vfstype int32 - _ [4]byte Fsize uint64 Vfsnumber int32 Vfsoff int32 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 2aeb52a886d..9f47b87c507 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -59,24 +59,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { @@ -487,3 +487,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 0d0d9f2ccb7..966798a8709 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { @@ -497,3 +497,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 04e344b78d8..4fe4c9cd73e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -60,24 +60,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { @@ -488,3 +488,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 9fec185c180..21999e4b0a2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { @@ -497,3 +497,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 7b34e2e2c68..c206f2b0534 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -57,25 +57,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Ino uint64 - Nlink uint32 - Dev uint32 - Mode uint16 - Padding1 uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare1 int64 - Qspare2 int64 + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + _1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 } type Statfs_t struct { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index c146c1ad354..7312e95ff42 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -62,50 +62,50 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim_ext int32 - Atim Timespec - Mtim_ext int32 - Mtim Timespec - Ctim_ext int32 - Ctim Timespec - Btim_ext int32 - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + _ int32 + Atim Timespec + _ int32 + Mtim Timespec + _ int32 + Ctim Timespec + _ int32 + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec - _ [8]byte + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec + _ [8]byte } type Statfs_t struct { @@ -324,11 +324,108 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 +) + +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 ) +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + X_reason [32]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Fs uint32 + Es uint32 + Ds uint32 + Edi uint32 + Esi uint32 + Ebp uint32 + Isp uint32 + Ebx uint32 + Edx uint32 + Ecx uint32 + Eax uint32 + Trapno uint32 + Err uint32 + Eip uint32 + Cs uint32 + Eflags uint32 + Esp uint32 + Ss uint32 + Gs uint32 +} + +type FpReg struct { + Env [7]uint32 + Acc [8][10]uint8 + Ex_sw uint32 + Pad [64]uint8 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint32 Filter int16 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index ac33a8dd4a6..29ba2f5bf74 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { @@ -322,11 +322,115 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 ) +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + R15 int64 + R14 int64 + R13 int64 + R12 int64 + R11 int64 + R10 int64 + R9 int64 + R8 int64 + Rdi int64 + Rsi int64 + Rbp int64 + Rbx int64 + Rdx int64 + Rcx int64 + Rax int64 + Trapno uint32 + Fs uint16 + Gs uint16 + Err uint32 + Es uint16 + Ds uint16 + Rip int64 + Cs int64 + Rflags int64 + Rsp int64 + Ss int64 +} + +type FpReg struct { + Env [4]uint64 + Acc [8][16]uint8 + Xacc [16][16]uint8 + Spare [12]uint64 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint64 Filter int16 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index e27511a642f..b4090ef3115 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -64,45 +64,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { @@ -322,11 +322,92 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 +) + +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 ) +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + X_reason [32]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + R [13]uint32 + R_sp uint32 + R_lr uint32 + R_pc uint32 + R_cpsr uint32 +} + +type FpReg struct { + Fpr_fpsr uint32 + Fpr [8][3]uint32 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint32 Filter int16 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 2aadc1a4d8f..1542a87734a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { @@ -322,11 +322,93 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 +) + +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 ) +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + X_reason [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + X [30]uint64 + Lr uint64 + Sp uint64 + Elr uint64 + Spsr uint32 +} + +type FpReg struct { + Fp_q [32]uint128 + Fp_sr uint32 + Fp_cr uint32 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint64 Filter int16 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index d262150cc08..50bc4128ff6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -405,6 +405,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -434,141 +439,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -634,6 +683,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x8 @@ -759,6 +829,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -955,7 +1027,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1058,6 +1131,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1079,21 +1153,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1106,6 +1197,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1362,6 +1454,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1410,6 +1517,9 @@ const ( SizeofTpacketHdr = 0x18 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2025,3 +2135,389 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index e492caacda5..055eaa76a47 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -406,6 +406,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -435,141 +440,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -635,6 +684,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -772,6 +842,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -966,7 +1038,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1069,6 +1142,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1090,21 +1164,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1117,6 +1208,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1374,6 +1466,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1422,6 +1529,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2038,3 +2148,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index ad4342156e6..66019c9cfeb 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -409,6 +409,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -438,141 +443,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -638,6 +687,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x8 @@ -748,6 +818,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -944,7 +1016,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1047,6 +1120,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1068,21 +1142,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1095,6 +1186,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1352,6 +1444,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1400,6 +1507,9 @@ const ( SizeofTpacketHdr = 0x18 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2016,3 +2126,389 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index ef76a362d9c..3104798c40e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,141 +441,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -636,6 +685,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -751,6 +821,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -945,7 +1017,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1048,6 +1121,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1069,21 +1143,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1096,6 +1187,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1353,6 +1445,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1401,6 +1508,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2017,3 +2127,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index dbf05903d42..46c86021b71 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,141 +442,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -637,6 +686,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x8 @@ -753,6 +823,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -949,7 +1021,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1052,6 +1125,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1073,21 +1147,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1100,6 +1191,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1358,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1406,6 +1513,9 @@ const ( SizeofTpacketHdr = 0x18 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2022,3 +2132,389 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 1b7e6707934..c2fe1a62a65 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,141 +441,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -636,6 +685,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -753,6 +823,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -947,7 +1019,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1050,6 +1123,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1071,21 +1145,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1098,6 +1189,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1355,6 +1447,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1403,6 +1510,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2019,3 +2129,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 57379005b58..f1eb0d39799 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,141 +441,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -636,6 +685,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -753,6 +823,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -947,7 +1019,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1050,6 +1123,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1071,21 +1145,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1098,6 +1189,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1355,6 +1447,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1403,6 +1510,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2019,3 +2129,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 0e88bf47bcd..8759bc36b82 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,141 +442,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -637,6 +686,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x8 @@ -753,6 +823,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -949,7 +1021,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1052,6 +1125,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1073,21 +1147,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1100,6 +1191,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1358,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1406,6 +1513,9 @@ const ( SizeofTpacketHdr = 0x18 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2022,3 +2132,389 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 5ac91b3f7aa..a8120054123 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,141 +442,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -637,6 +686,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -761,6 +831,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -955,7 +1027,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1058,6 +1131,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1079,21 +1153,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1106,6 +1197,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1363,6 +1455,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1411,6 +1518,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2027,3 +2137,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 1e59b45068c..74b7a9199b3 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,141 +442,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -637,6 +686,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -761,6 +831,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -955,7 +1027,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1058,6 +1131,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1079,21 +1153,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1106,6 +1197,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1363,6 +1455,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1411,6 +1518,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2027,3 +2137,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 508885f11f0..ccea3e63873 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,141 +441,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -636,6 +685,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -738,6 +808,7 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } @@ -778,6 +849,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -972,7 +1045,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1075,6 +1149,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1096,21 +1171,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1123,6 +1215,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1380,6 +1473,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1428,6 +1536,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2044,3 +2155,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d315f2c3a7a..d8fc0bc1cda 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -406,6 +406,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -435,141 +440,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -635,6 +684,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -774,6 +844,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -968,7 +1040,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1071,6 +1144,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1092,21 +1166,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1119,6 +1210,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1377,6 +1469,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1425,6 +1532,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2041,3 +2151,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint64 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index a1a9279c225..5e0ab932926 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -410,6 +410,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -439,141 +444,185 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_INFO_KIND = 0x1 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x33 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x33 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -639,6 +688,27 @@ type RtNexthop struct { Ifindex int32 } +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 @@ -756,6 +826,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -950,7 +1022,8 @@ type PerfEventAttr struct { Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 - _ uint32 + Sample_max_stack uint16 + _ uint16 } type PerfEventMmapPage struct { @@ -1053,6 +1126,7 @@ const ( PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 @@ -1074,21 +1148,38 @@ const ( PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 @@ -1101,6 +1192,7 @@ const ( PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( @@ -1358,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -1406,6 +1513,9 @@ const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc ) const ( @@ -2022,3 +2132,390 @@ type SockExtendedErr struct { Info uint32 Data uint32 } + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2dae0c17a3c..86736ab6e7f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -57,23 +57,23 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 + Dev uint64 + Mode uint32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 } type Statfs_t [0]byte @@ -411,6 +411,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 1f0e76c0ccc..3427811f989 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte @@ -418,6 +418,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 53f2159c7d2..399f37a4341 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -59,26 +59,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte @@ -416,6 +416,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 43da2c41c50..32f0c15d98e 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte @@ -418,6 +418,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 8b37d83992b..61ea0019a29 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -436,6 +436,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) @@ -558,3 +559,13 @@ type Uvmexp struct { Fpswtch int32 Kmapent int32 } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 6efea463559..87a493f68fd 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -436,6 +436,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) @@ -558,3 +559,13 @@ type Uvmexp struct { Fpswtch int32 Kmapent int32 } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 510efc3eaac..d80836efaba 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -437,6 +437,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) @@ -559,3 +560,13 @@ type Uvmexp struct { Fpswtch int32 Kmapent int32 } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go new file mode 100644 index 00000000000..4e158746f11 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -0,0 +1,565 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/dll_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/dll_windows.go index e92c05b213c..ba67658db1d 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/dll_windows.go @@ -359,11 +359,11 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { // trying to load "foo.dll" out of the system // folder, but LoadLibraryEx doesn't support // that yet on their system, so emulate it. - windir, _ := Getenv("WINDIR") // old var; apparently works on XP - if windir == "" { - return nil, errString("%WINDIR% not defined") + systemdir, err := GetSystemDirectory() + if err != nil { + return nil, err } - loadDLL = windir + "\\System32\\" + name + loadDLL = systemdir + "\\" + name } } h, err := LoadLibraryEx(loadDLL, 0, flags) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go index bdc71e241e0..f482a9fab3b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go @@ -6,7 +6,11 @@ package windows -import "syscall" +import ( + "syscall" + "unicode/utf16" + "unsafe" +) func Getenv(key string) (value string, found bool) { return syscall.Getenv(key) @@ -24,6 +28,34 @@ func Environ() []string { return syscall.Environ() } +// Returns a default environment associated with the token, rather than the current +// process. If inheritExisting is true, then this environment also inherits the +// environment of the current process. +func (token Token) Environ(inheritExisting bool) (env []string, err error) { + var block *uint16 + err = CreateEnvironmentBlock(&block, token, inheritExisting) + if err != nil { + return nil, err + } + defer DestroyEnvironmentBlock(block) + blockp := uintptr(unsafe.Pointer(block)) + for { + entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:] + for i, v := range entry { + if v == 0 { + entry = entry[:i] + break + } + } + if len(entry) == 0 { + break + } + env = append(env, string(utf16.Decode(entry))) + blockp += 2 * (uintptr(len(entry)) + 1) + } + return env, nil +} + func Unsetenv(key string) error { return syscall.Unsetenv(key) } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkerrors.bash b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkerrors.bash new file mode 100644 index 00000000000..2163843a11d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -0,0 +1,63 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" +[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } + +declare -A errors + +{ + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "import \"syscall\"" + echo "const (" + + while read -r line; do + unset vtype + if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + else + continue + fi + [[ -n $key && -n $value ]] || continue + [[ -z ${errors["$key"]} ]] || continue + errors["$key"]="$value" + if [[ -v vtype ]]; then + if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then + vtype="" + elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then + vtype="Handle" + else + vtype="syscall.Errno" + fi + last_vtype="$vtype" + else + vtype="" + if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then + value="S_OK" + elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then + value="ERROR_SUCCESS" + fi + fi + + echo "$key $vtype = $value" + done < "$winerror" + + echo ")" +} | gofmt > "zerrors_windows.go" diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 00000000000..ab8924e936f --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/mksyscall.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mksyscall.go index fb7db0ef8d4..62770572747 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/mksyscall.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build generate + package windows //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/mksyscall.go index 0ac95ffe731..cf843ce2b22 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build generate + package registry //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/value.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/value.go index 71d4e15bab1..7487e05f8db 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/value.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/registry/value.go @@ -68,7 +68,7 @@ func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error return int(l), valtype, nil } -func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) { +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { p, err := syscall.UTF16PtrFromString(name) if err != nil { return nil, 0, err @@ -241,12 +241,15 @@ func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error if len(data) != 4 { return 0, typ, errors.New("DWORD value is not 4 bytes long") } - return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil case QWORD: if len(data) != 8 { return 0, typ, errors.New("QWORD value is not 8 bytes long") } - return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil default: return 0, typ, ErrUnexpectedType } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/security_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/security_windows.go index 4f17a3331fd..61b49647b9a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/security_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/security_windows.go @@ -149,7 +149,7 @@ const ( DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d - DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 @@ -169,15 +169,21 @@ const ( //sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid //sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid //sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid +//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid //sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid //sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid +//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount +//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority +//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid // The security identifier (SID) structure is a variable-length // structure used to uniquely identify users or groups. type SID struct{} // StringToSid converts a string-format security identifier -// sid into a valid, functional sid. +// SID into a valid, functional SID. func StringToSid(s string) (*SID, error) { var sid *SID p, e := UTF16PtrFromString(s) @@ -192,7 +198,7 @@ func StringToSid(s string) (*SID, error) { return sid.Copy() } -// LookupSID retrieves a security identifier sid for the account +// LookupSID retrieves a security identifier SID for the account // and the name of the domain on which the account was found. // System specify target computer to search. func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { @@ -229,7 +235,7 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32, } } -// String converts sid to a string format +// String converts SID to a string format // suitable for display, storage, or transmission. func (sid *SID) String() (string, error) { var s *uint16 @@ -241,12 +247,12 @@ func (sid *SID) String() (string, error) { return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil } -// Len returns the length, in bytes, of a valid security identifier sid. +// Len returns the length, in bytes, of a valid security identifier SID. func (sid *SID) Len() int { return int(GetLengthSid(sid)) } -// Copy creates a duplicate of security identifier sid. +// Copy creates a duplicate of security identifier SID. func (sid *SID) Copy() (*SID, error) { b := make([]byte, sid.Len()) sid2 := (*SID)(unsafe.Pointer(&b[0])) @@ -257,8 +263,42 @@ func (sid *SID) Copy() (*SID, error) { return sid2, nil } -// LookupAccount retrieves the name of the account for this sid -// and the name of the first domain on which this sid is found. +// IdentifierAuthority returns the identifier authority of the SID. +func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { + return *getSidIdentifierAuthority(sid) +} + +// SubAuthorityCount returns the number of sub-authorities in the SID. +func (sid *SID) SubAuthorityCount() uint8 { + return *getSidSubAuthorityCount(sid) +} + +// SubAuthority returns the sub-authority of the SID as specified by +// the index, which must be less than sid.SubAuthorityCount(). +func (sid *SID) SubAuthority(idx uint32) uint32 { + if idx >= uint32(sid.SubAuthorityCount()) { + panic("sub-authority index out of range") + } + return *getSidSubAuthority(sid, idx) +} + +// IsValid returns whether the SID has a valid revision and length. +func (sid *SID) IsValid() bool { + return isValidSid(sid) +} + +// Equals compares two SIDs for equality. +func (sid *SID) Equals(sid2 *SID) bool { + return EqualSid(sid, sid2) +} + +// IsWellKnown determines whether the SID matches the well-known sidType. +func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { + return isWellKnownSid(sid, sidType) +} + +// LookupAccount retrieves the name of the account for this SID +// and the name of the first domain on which this SID is found. // System specify target computer to search for. func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { var sys *uint16 @@ -286,6 +326,158 @@ func (sid *SID) LookupAccount(system string) (account, domain string, accType ui } } +// Various types of pre-specified SIDs that can be synthesized and compared at runtime. +type WELL_KNOWN_SID_TYPE uint32 + +const ( + WinNullSid = 0 + WinWorldSid = 1 + WinLocalSid = 2 + WinCreatorOwnerSid = 3 + WinCreatorGroupSid = 4 + WinCreatorOwnerServerSid = 5 + WinCreatorGroupServerSid = 6 + WinNtAuthoritySid = 7 + WinDialupSid = 8 + WinNetworkSid = 9 + WinBatchSid = 10 + WinInteractiveSid = 11 + WinServiceSid = 12 + WinAnonymousSid = 13 + WinProxySid = 14 + WinEnterpriseControllersSid = 15 + WinSelfSid = 16 + WinAuthenticatedUserSid = 17 + WinRestrictedCodeSid = 18 + WinTerminalServerSid = 19 + WinRemoteLogonIdSid = 20 + WinLogonIdsSid = 21 + WinLocalSystemSid = 22 + WinLocalServiceSid = 23 + WinNetworkServiceSid = 24 + WinBuiltinDomainSid = 25 + WinBuiltinAdministratorsSid = 26 + WinBuiltinUsersSid = 27 + WinBuiltinGuestsSid = 28 + WinBuiltinPowerUsersSid = 29 + WinBuiltinAccountOperatorsSid = 30 + WinBuiltinSystemOperatorsSid = 31 + WinBuiltinPrintOperatorsSid = 32 + WinBuiltinBackupOperatorsSid = 33 + WinBuiltinReplicatorSid = 34 + WinBuiltinPreWindows2000CompatibleAccessSid = 35 + WinBuiltinRemoteDesktopUsersSid = 36 + WinBuiltinNetworkConfigurationOperatorsSid = 37 + WinAccountAdministratorSid = 38 + WinAccountGuestSid = 39 + WinAccountKrbtgtSid = 40 + WinAccountDomainAdminsSid = 41 + WinAccountDomainUsersSid = 42 + WinAccountDomainGuestsSid = 43 + WinAccountComputersSid = 44 + WinAccountControllersSid = 45 + WinAccountCertAdminsSid = 46 + WinAccountSchemaAdminsSid = 47 + WinAccountEnterpriseAdminsSid = 48 + WinAccountPolicyAdminsSid = 49 + WinAccountRasAndIasServersSid = 50 + WinNTLMAuthenticationSid = 51 + WinDigestAuthenticationSid = 52 + WinSChannelAuthenticationSid = 53 + WinThisOrganizationSid = 54 + WinOtherOrganizationSid = 55 + WinBuiltinIncomingForestTrustBuildersSid = 56 + WinBuiltinPerfMonitoringUsersSid = 57 + WinBuiltinPerfLoggingUsersSid = 58 + WinBuiltinAuthorizationAccessSid = 59 + WinBuiltinTerminalServerLicenseServersSid = 60 + WinBuiltinDCOMUsersSid = 61 + WinBuiltinIUsersSid = 62 + WinIUserSid = 63 + WinBuiltinCryptoOperatorsSid = 64 + WinUntrustedLabelSid = 65 + WinLowLabelSid = 66 + WinMediumLabelSid = 67 + WinHighLabelSid = 68 + WinSystemLabelSid = 69 + WinWriteRestrictedCodeSid = 70 + WinCreatorOwnerRightsSid = 71 + WinCacheablePrincipalsGroupSid = 72 + WinNonCacheablePrincipalsGroupSid = 73 + WinEnterpriseReadonlyControllersSid = 74 + WinAccountReadonlyControllersSid = 75 + WinBuiltinEventLogReadersGroup = 76 + WinNewEnterpriseReadonlyControllersSid = 77 + WinBuiltinCertSvcDComAccessGroup = 78 + WinMediumPlusLabelSid = 79 + WinLocalLogonSid = 80 + WinConsoleLogonSid = 81 + WinThisOrganizationCertificateSid = 82 + WinApplicationPackageAuthoritySid = 83 + WinBuiltinAnyPackageSid = 84 + WinCapabilityInternetClientSid = 85 + WinCapabilityInternetClientServerSid = 86 + WinCapabilityPrivateNetworkClientServerSid = 87 + WinCapabilityPicturesLibrarySid = 88 + WinCapabilityVideosLibrarySid = 89 + WinCapabilityMusicLibrarySid = 90 + WinCapabilityDocumentsLibrarySid = 91 + WinCapabilitySharedUserCertificatesSid = 92 + WinCapabilityEnterpriseAuthenticationSid = 93 + WinCapabilityRemovableStorageSid = 94 + WinBuiltinRDSRemoteAccessServersSid = 95 + WinBuiltinRDSEndpointServersSid = 96 + WinBuiltinRDSManagementServersSid = 97 + WinUserModeDriversSid = 98 + WinBuiltinHyperVAdminsSid = 99 + WinAccountCloneableControllersSid = 100 + WinBuiltinAccessControlAssistanceOperatorsSid = 101 + WinBuiltinRemoteManagementUsersSid = 102 + WinAuthenticationAuthorityAssertedSid = 103 + WinAuthenticationServiceAssertedSid = 104 + WinLocalAccountSid = 105 + WinLocalAccountAndAdministratorSid = 106 + WinAccountProtectedUsersSid = 107 + WinCapabilityAppointmentsSid = 108 + WinCapabilityContactsSid = 109 + WinAccountDefaultSystemManagedSid = 110 + WinBuiltinDefaultSystemManagedGroupSid = 111 + WinBuiltinStorageReplicaAdminsSid = 112 + WinAccountKeyAdminsSid = 113 + WinAccountEnterpriseKeyAdminsSid = 114 + WinAuthenticationKeyTrustSid = 115 + WinAuthenticationKeyPropertyMFASid = 116 + WinAuthenticationKeyPropertyAttestationSid = 117 + WinAuthenticationFreshKeyAuthSid = 118 + WinBuiltinDeviceOwnersSid = 119 +) + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the local machine. +func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { + return CreateWellKnownDomainSid(sidType, nil) +} + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the domain specified by the domainSid parameter. +func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { + n := uint32(50) + for { + b := make([]byte, n) + sid := (*SID)(unsafe.Pointer(&b[0])) + err := createWellKnownSid(sidType, domainSid, sid, &n) + if err == nil { + return sid, nil + } + if err != ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + if n <= uint32(len(b)) { + return nil, err + } + } +} + const ( // do not reorder TOKEN_ASSIGN_PRIMARY = 1 << iota @@ -349,6 +541,53 @@ const ( MaxTokenInfoClass ) +// Group attributes inside of Tokengroups.Groups[i].Attributes +const ( + SE_GROUP_MANDATORY = 0x00000001 + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 + SE_GROUP_ENABLED = 0x00000004 + SE_GROUP_OWNER = 0x00000008 + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 + SE_GROUP_INTEGRITY = 0x00000020 + SE_GROUP_INTEGRITY_ENABLED = 0x00000040 + SE_GROUP_LOGON_ID = 0xC0000000 + SE_GROUP_RESOURCE = 0x20000000 + SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED +) + +// Privilege attributes +const ( + SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 + SE_PRIVILEGE_ENABLED = 0x00000002 + SE_PRIVILEGE_REMOVED = 0x00000004 + SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 + SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS +) + +// Token types +const ( + TokenPrimary = 1 + TokenImpersonation = 2 +) + +// Impersonation levels +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUIDAndAttributes struct { + Luid LUID + Attributes uint32 +} + type SIDAndAttributes struct { Sid *SID Attributes uint32 @@ -364,14 +603,47 @@ type Tokenprimarygroup struct { type Tokengroups struct { GroupCount uint32 - Groups [1]SIDAndAttributes + Groups [1]SIDAndAttributes // Use AllGroups() for iterating. +} + +// AllGroups returns a slice that can be used to iterate over the groups in g. +func (g *Tokengroups) AllGroups() []SIDAndAttributes { + return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +type Tokenprivileges struct { + PrivilegeCount uint32 + Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. +} + +// AllPrivileges returns a slice that can be used to iterate over the privileges in p. +func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { + return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] +} + +type Tokenmandatorylabel struct { + Label SIDAndAttributes +} + +func (tml *Tokenmandatorylabel) Size() uint32 { + return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) } // Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges +//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups +//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation +//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW +//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW // An access token contains the security information for a logon session. // The system creates an access token when a user logs on, and every @@ -383,7 +655,9 @@ type Tokengroups struct { type Token Handle // OpenCurrentProcessToken opens the access token -// associated with current process. +// associated with current process. It is a real +// token that needs to be closed, unlike +// GetCurrentProcessToken. func OpenCurrentProcessToken() (Token, error) { p, e := GetCurrentProcess() if e != nil { @@ -397,6 +671,27 @@ func OpenCurrentProcessToken() (Token, error) { return t, nil } +// GetCurrentProcessToken returns the access token associated with +// the current process. It is a pseudo token that does not need +// to be closed. +func GetCurrentProcessToken() Token { + return Token(^uintptr(4 - 1)) +} + +// GetCurrentThreadToken return the access token associated with +// the current thread. It is a pseudo token that does not need +// to be closed. +func GetCurrentThreadToken() Token { + return Token(^uintptr(5 - 1)) +} + +// GetCurrentThreadEffectiveToken returns the effective access token +// associated with the current thread. It is a pseudo token that does +// not need to be closed. +func GetCurrentThreadEffectiveToken() Token { + return Token(^uintptr(6 - 1)) +} + // Close releases access to access token. func (t Token) Close() error { return CloseHandle(Handle(t)) @@ -468,6 +763,45 @@ func (t Token) GetUserProfileDirectory() (string, error) { } } +// IsElevated returns whether the current token is elevated from a UAC perspective. +func (token Token) IsElevated() bool { + var isElevated uint32 + var outLen uint32 + err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) + if err != nil { + return false + } + return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 +} + +// GetLinkedToken returns the linked token, which may be an elevated UAC token. +func (token Token) GetLinkedToken() (Token, error) { + var linkedToken Token + var outLen uint32 + err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) + if err != nil { + return Token(0), err + } + return linkedToken, nil +} + +// GetSystemDirectory retrieves path to current location of the system +// directory, which is typically, though not always, C:\Windows\System32. +func GetSystemDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + // IsMember reports whether the access token t is a member of the provided SID. func (t Token) IsMember(sid *SID) (bool, error) { var b int32 @@ -476,3 +810,45 @@ func (t Token) IsMember(sid *SID) (bool, error) { } return b != 0, nil } + +const ( + WTS_CONSOLE_CONNECT = 0x1 + WTS_CONSOLE_DISCONNECT = 0x2 + WTS_REMOTE_CONNECT = 0x3 + WTS_REMOTE_DISCONNECT = 0x4 + WTS_SESSION_LOGON = 0x5 + WTS_SESSION_LOGOFF = 0x6 + WTS_SESSION_LOCK = 0x7 + WTS_SESSION_UNLOCK = 0x8 + WTS_SESSION_REMOTE_CONTROL = 0x9 + WTS_SESSION_CREATE = 0xa + WTS_SESSION_TERMINATE = 0xb +) + +const ( + WTSActive = 0 + WTSConnected = 1 + WTSConnectQuery = 2 + WTSShadow = 3 + WTSDisconnected = 4 + WTSIdle = 5 + WTSListen = 6 + WTSReset = 7 + WTSDown = 8 + WTSInit = 9 +) + +type WTSSESSION_NOTIFICATION struct { + Size uint32 + SessionID uint32 +} + +type WTS_SESSION_INFO struct { + SessionID uint32 + WindowStationName *uint16 + State uint32 +} + +//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken +//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW +//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go index 62fc31b40bd..847e00bc990 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go @@ -85,23 +85,47 @@ const ( SERVICE_INACTIVE = 2 SERVICE_STATE_ALL = 3 - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 - NO_ERROR = 0 + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 + SERVICE_CONFIG_SERVICE_SID_INFO = 5 + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 + SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 + SERVICE_CONFIG_TRIGGER_INFO = 8 + SERVICE_CONFIG_PREFERRED_NODE = 9 + SERVICE_CONFIG_LAUNCH_PROTECTED = 12 + + SERVICE_SID_TYPE_NONE = 0 + SERVICE_SID_TYPE_UNRESTRICTED = 1 + SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED SC_ENUM_PROCESS_INFO = 0 + + SERVICE_NOTIFY_STATUS_CHANGE = 2 + SERVICE_NOTIFY_STOPPED = 0x00000001 + SERVICE_NOTIFY_START_PENDING = 0x00000002 + SERVICE_NOTIFY_STOP_PENDING = 0x00000004 + SERVICE_NOTIFY_RUNNING = 0x00000008 + SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 + SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 + SERVICE_NOTIFY_PAUSED = 0x00000040 + SERVICE_NOTIFY_CREATED = 0x00000080 + SERVICE_NOTIFY_DELETED = 0x00000100 + SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 ) type SERVICE_STATUS struct { @@ -135,6 +159,10 @@ type SERVICE_DESCRIPTION struct { Description *uint16 } +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + type SERVICE_STATUS_PROCESS struct { ServiceType uint32 CurrentState uint32 @@ -153,6 +181,16 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } +type SERVICE_NOTIFY struct { + Version uint32 + NotifyCallback uintptr + Context uintptr + NotificationStatus uint32 + ServiceStatus SERVICE_STATUS_PROCESS + NotificationTriggered uint32 + ServiceNames *uint16 +} + type SERVICE_FAILURE_ACTIONS struct { ResetPeriod uint32 RebootMsg *uint16 @@ -166,12 +204,19 @@ type SC_ACTION struct { Delay uint32 } +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW //sys DeleteService(service Handle) (err error) = advapi32.DeleteService //sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW //sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW //sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService //sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW //sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus @@ -180,4 +225,5 @@ type SC_ACTION struct { //sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W //sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W //sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go index cda26b54b39..ee3d6965a18 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go @@ -72,6 +72,7 @@ type Status struct { Accepts Accepted CheckPoint uint32 // used to report progress during a lengthy operation WaitHint uint32 // estimated time required for a pending operation, in milliseconds + ProcessId uint32 // if the service is running, the process identifier of it, and otherwise zero } // ChangeRequest is sent to the service Handler to request service status change. @@ -80,6 +81,7 @@ type ChangeRequest struct { EventType uint32 EventData uintptr CurrentStatus Status + Context uintptr } // Handler is the interface that must be implemented to build Windows service. @@ -114,19 +116,18 @@ var ( ) func init() { - k := syscall.MustLoadDLL("kernel32.dll") - cSetEvent = k.MustFindProc("SetEvent").Addr() - cWaitForSingleObject = k.MustFindProc("WaitForSingleObject").Addr() - a := syscall.MustLoadDLL("advapi32.dll") - cRegisterServiceCtrlHandlerExW = a.MustFindProc("RegisterServiceCtrlHandlerExW").Addr() + k := windows.NewLazySystemDLL("kernel32.dll") + cSetEvent = k.NewProc("SetEvent").Addr() + cWaitForSingleObject = k.NewProc("WaitForSingleObject").Addr() + a := windows.NewLazySystemDLL("advapi32.dll") + cRegisterServiceCtrlHandlerExW = a.NewProc("RegisterServiceCtrlHandlerExW").Addr() } -// The HandlerEx prototype also has a context pointer but since we don't use -// it at start-up time we don't have to pass it over either. type ctlEvent struct { cmd Cmd eventType uint32 eventData uintptr + context uintptr errno uint32 } @@ -238,13 +239,12 @@ func (s *service) run() { exitFromHandler <- exitCode{ss, errno} }() - status := Status{State: Stopped} ec := exitCode{isSvcSpecific: true, errno: 0} + outcr := ChangeRequest{ + CurrentStatus: Status{State: Stopped}, + } var outch chan ChangeRequest inch := s.c - var cmd Cmd - var evtype uint32 - var evdata uintptr loop: for { select { @@ -255,10 +255,11 @@ loop: } inch = nil outch = cmdsToHandler - cmd = r.cmd - evtype = r.eventType - evdata = r.eventData - case outch <- ChangeRequest{cmd, evtype, evdata, status}: + outcr.Cmd = r.cmd + outcr.EventType = r.eventType + outcr.EventData = r.eventData + outcr.Context = r.context + case outch <- outcr: inch = s.c outch = nil case c := <-changesFromHandler: @@ -271,7 +272,7 @@ loop: } break loop } - status = c + outcr.CurrentStatus = c case ec = <-exitFromHandler: break loop } @@ -315,8 +316,8 @@ func Run(name string, handler Handler) error { return err } - ctlHandler := func(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { - e := ctlEvent{cmd: Cmd(ctl), eventType: evtype, eventData: evdata} + ctlHandler := func(ctl, evtype, evdata, context uintptr) uintptr { + e := ctlEvent{cmd: Cmd(ctl), eventType: uint32(evtype), eventData: evdata, context: context} // We assume that this callback function is running on // the same thread as Run. Nowhere in MS documentation // I could find statement to guarantee that. So putting @@ -328,7 +329,7 @@ func Run(name string, handler Handler) error { } s.c <- e // Always return NO_ERROR (0) for now. - return 0 + return windows.NO_ERROR } var svcmain uintptr diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_386.s b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_386.s index 2c82a9d91d7..c8a583d7375 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_386.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_386.s @@ -22,7 +22,8 @@ TEXT ·servicemain(SB),7,$0 MOVL AX, (SP) MOVL $·servicectlhandler(SB), AX MOVL AX, 4(SP) - MOVL $0, 8(SP) + // Set context to 123456 to test issue #25660. + MOVL $123456, 8(SP) MOVL ·cRegisterServiceCtrlHandlerExW(SB), AX MOVL SP, BP CALL AX diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_amd64.s b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_amd64.s index bde25e9c485..2f7609c5b64 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_amd64.s +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/sys_amd64.s @@ -14,6 +14,8 @@ TEXT ·servicemain(SB),7,$0 MOVQ ·sName(SB), CX MOVQ $·servicectlhandler(SB), DX // BUG(pastarmovj): Figure out a way to pass in context in R8. + // Set context to 123456 to test issue #25660. + MOVQ $123456, R8 MOVQ ·cRegisterServiceCtrlHandlerExW(SB), AX CALL AX CMPQ AX, $0 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go index f72fa55f3ef..b23050924f5 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,6 +10,7 @@ import ( errorspkg "errors" "sync" "syscall" + "time" "unicode/utf16" "unsafe" ) @@ -55,6 +56,10 @@ const ( FILE_UNICODE_ON_DISK = 0x00000004 FILE_VOLUME_IS_COMPRESSED = 0x00008000 FILE_VOLUME_QUOTAS = 0x00000020 + + // Return values of SleepEx and other APC functions + STATUS_USER_APC = 0x000000C0 + WAIT_IO_COMPLETION = STATUS_USER_APC ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -134,9 +139,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) //sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] //sys CloseHandle(handle Handle) (err error) //sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] @@ -145,6 +152,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW //sys FindClose(handle Handle) (err error) //sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) //sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW //sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW //sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW @@ -164,11 +172,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW //sys GetCurrentProcess() (pseudoHandle Handle, err error) +//sys GetCurrentThread() (pseudoHandle Handle, err error) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -183,6 +194,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -221,7 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo @@ -230,6 +244,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) //sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) // This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW @@ -241,6 +257,18 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx +//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW +//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject +//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject +//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode +//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread +//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass +//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) +//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -262,6 +290,12 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW +//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree +//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion // syscall interface implementation for other packages @@ -476,6 +510,10 @@ func ComputerName() (name string, err error) { return string(utf16.Decode(b[0:n])), nil } +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + func Ftruncate(fd Handle, length int64) (err error) { curoffset, e := Seek(fd, 0, 1) if e != nil { @@ -559,9 +597,6 @@ func Fsync(fd Handle) (err error) { } func Chmod(path string, mode uint32) (err error) { - if mode == 0 { - return syscall.EINVAL - } p, e := UTF16PtrFromString(path) if e != nil { return e @@ -1088,7 +1123,7 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } -func Getpid() (pid int) { return int(getCurrentProcessId()) } +func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { // NOTE(rsc): The Win32finddata struct is wrong for the system call: @@ -1216,3 +1251,70 @@ func Readlink(path string, buf []byte) (n int, err error) { return n, nil } + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer CoTaskMemFree(unsafe.Pointer(p)) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil +} + +// RtlGetVersion returns the true version of the underlying operating system, ignoring +// any manifesting or compatibility layers on top of the win32 layer. +func RtlGetVersion() *OsVersionInfoEx { + info := &OsVersionInfoEx{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) + // According to documentation, this function always succeeds. + // The function doesn't even check the validity of the + // osVersionInfoSize member. Disassembling ntdll.dll indicates + // that the documentation is indeed correct about that. + _ = rtlGetVersion(info) + return info +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go index 141ca81bd74..1e3947f0f60 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go @@ -4,33 +4,10 @@ package windows -import "syscall" - -const ( - // Windows errors. - ERROR_FILE_NOT_FOUND syscall.Errno = 2 - ERROR_PATH_NOT_FOUND syscall.Errno = 3 - ERROR_ACCESS_DENIED syscall.Errno = 5 - ERROR_NO_MORE_FILES syscall.Errno = 18 - ERROR_HANDLE_EOF syscall.Errno = 38 - ERROR_NETNAME_DELETED syscall.Errno = 64 - ERROR_FILE_EXISTS syscall.Errno = 80 - ERROR_BROKEN_PIPE syscall.Errno = 109 - ERROR_BUFFER_OVERFLOW syscall.Errno = 111 - ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 - ERROR_MOD_NOT_FOUND syscall.Errno = 126 - ERROR_PROC_NOT_FOUND syscall.Errno = 127 - ERROR_ALREADY_EXISTS syscall.Errno = 183 - ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 - ERROR_MORE_DATA syscall.Errno = 234 - ERROR_OPERATION_ABORTED syscall.Errno = 995 - ERROR_IO_PENDING syscall.Errno = 997 - ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 - ERROR_NOT_FOUND syscall.Errno = 1168 - ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - WSAEACCES syscall.Errno = 10013 - WSAEMSGSIZE syscall.Errno = 10040 - WSAECONNRESET syscall.Errno = 10054 +import ( + "net" + "syscall" + "unsafe" ) const ( @@ -126,9 +103,19 @@ const ( OPEN_ALWAYS = 4 TRUNCATE_EXISTING = 5 - FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 - FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 - FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 + FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 + FILE_FLAG_OPEN_NO_RECALL = 0x00100000 + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_SESSION_AWARE = 0x00800000 + FILE_FLAG_POSIX_SEMANTICS = 0x01000000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 + FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 + FILE_FLAG_RANDOM_ACCESS = 0x10000000 + FILE_FLAG_NO_BUFFERING = 0x20000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_WRITE_THROUGH = 0x80000000 HANDLE_FLAG_INHERIT = 0x00000001 STARTF_USESTDHANDLES = 0x00000100 @@ -167,22 +154,54 @@ const ( IGNORE = 0 INFINITE = 0xffffffff - WAIT_TIMEOUT = 258 WAIT_ABANDONED = 0x00000080 WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - PROCESS_TERMINATE = 1 - PROCESS_QUERY_INFORMATION = 0x00000400 - SYNCHRONIZE = 0x00100000 + // Standard access rights. + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + SYNCHRONIZE = 0x00100000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 FILE_MAP_COPY = 0x01 FILE_MAP_WRITE = 0x02 FILE_MAP_READ = 0x04 FILE_MAP_EXECUTE = 0x20 - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 // Windows reserves errors >= 1<<29 for application use. APPLICATION_ERROR = 1 << 29 @@ -402,12 +421,6 @@ const ( CERT_CHAIN_POLICY_EV = 8 CERT_CHAIN_POLICY_SSL_F12 = 9 - CERT_E_EXPIRED = 0x800B0101 - CERT_E_ROLE = 0x800B0103 - CERT_E_PURPOSE = 0x800B0106 - CERT_E_UNTRUSTEDROOT = 0x800B0109 - CERT_E_CN_NO_MATCH = 0x800B010F - /* AuthType values for SSLExtraCertChainPolicyPara struct */ AUTHTYPE_CLIENT = 1 AUTHTYPE_SERVER = 2 @@ -420,6 +433,26 @@ const ( SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 ) +const ( + // flags for SetErrorMode + SEM_FAILCRITICALERRORS = 0x0001 + SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 + SEM_NOGPFAULTERRORBOX = 0x0002 + SEM_NOOPENFILEERRORBOX = 0x8000 +) + +const ( + // Priority class. + ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 + BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 + HIGH_PRIORITY_CLASS = 0x00000080 + IDLE_PRIORITY_CLASS = 0x00000040 + NORMAL_PRIORITY_CLASS = 0x00000020 + PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 + PROCESS_MODE_BACKGROUND_END = 0x00200000 + REALTIME_PRIORITY_CLASS = 0x00000100 +) + var ( OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") @@ -629,6 +662,16 @@ type ProcessEntry32 struct { ExeFile [MAX_PATH]uint16 } +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + type Systemtime struct { Year uint16 Month uint16 @@ -849,10 +892,6 @@ const ( DNS_TYPE_NBSTAT = 0xff01 ) -const ( - DNS_INFO_NO_RECORDS = 0x251D -) - const ( // flags inside DNSRecord.Dw DnsSectionQuestion = 0x0000 @@ -1314,6 +1353,41 @@ const ( ComputerNameMax = 8 ) +// For MessageBox() +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 + MB_APPLMODAL = 0x00000000 + MB_SYSTEMMODAL = 0x00001000 + MB_TASKMODAL = 0x00002000 + MB_HELP = 0x00004000 + MB_NOFOCUS = 0x00008000 + MB_SETFOREGROUND = 0x00010000 + MB_DEFAULT_DESKTOP_ONLY = 0x00020000 + MB_TOPMOST = 0x00040000 + MB_RIGHT = 0x00080000 + MB_RTLREADING = 0x00100000 + MB_SERVICE_NOTIFICATION = 0x00200000 +) + const ( MOVEFILE_REPLACE_EXISTING = 0x1 MOVEFILE_COPY_ALLOWED = 0x2 @@ -1342,6 +1416,16 @@ type SocketAddress struct { SockaddrLength int32 } +// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. +func (addr *SocketAddress) IP() net.IP { + if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { + return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { + return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } + return nil +} + type IpAdapterUnicastAddress struct { Length uint32 Flags uint32 @@ -1467,3 +1551,118 @@ type ConsoleScreenBufferInfo struct { } const UNIX_PATH_MAX = 108 // defined in afunix.h + +const ( + // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags + JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 + JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 + JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 + JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 + JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 + JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 + JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 + JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 + JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 + JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 + JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 + JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 +) + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +type IO_COUNTERS struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { + BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION + IoInfo IO_COUNTERS + ProcessMemoryLimit uintptr + JobMemoryLimit uintptr + PeakProcessMemoryUsed uintptr + PeakJobMemoryUsed uintptr +} + +const ( + // UIRestrictionsClass + JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 + JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 + JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 + JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 + JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 + JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 +) + +type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { + UIRestrictionsClass uint32 +} + +const ( + // JobObjectInformationClass + JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicLimitInformation = 2 + JobObjectBasicUIRestrictions = 4 + JobObjectCpuRateControlInformation = 15 + JobObjectEndOfJobTimeInformation = 6 + JobObjectExtendedLimitInformation = 9 + JobObjectGroupInformation = 11 + JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation2 = 35 + JobObjectNetRateControlInformation = 32 + JobObjectNotificationLimitInformation = 12 + JobObjectNotificationLimitInformation2 = 34 + JobObjectSecurityLimitInformation = 5 +) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) + +type OsVersionInfoEx struct { + osVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformId uint32 + CsdVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + _ byte +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zerrors_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zerrors_windows.go new file mode 100644 index 00000000000..f0212003520 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -0,0 +1,6853 @@ +// Code generated by 'mkerrors.bash'; DO NOT EDIT. + +package windows + +import "syscall" + +const ( + FACILITY_NULL = 0 + FACILITY_RPC = 1 + FACILITY_DISPATCH = 2 + FACILITY_STORAGE = 3 + FACILITY_ITF = 4 + FACILITY_WIN32 = 7 + FACILITY_WINDOWS = 8 + FACILITY_SSPI = 9 + FACILITY_SECURITY = 9 + FACILITY_CONTROL = 10 + FACILITY_CERT = 11 + FACILITY_INTERNET = 12 + FACILITY_MEDIASERVER = 13 + FACILITY_MSMQ = 14 + FACILITY_SETUPAPI = 15 + FACILITY_SCARD = 16 + FACILITY_COMPLUS = 17 + FACILITY_AAF = 18 + FACILITY_URT = 19 + FACILITY_ACS = 20 + FACILITY_DPLAY = 21 + FACILITY_UMI = 22 + FACILITY_SXS = 23 + FACILITY_WINDOWS_CE = 24 + FACILITY_HTTP = 25 + FACILITY_USERMODE_COMMONLOG = 26 + FACILITY_WER = 27 + FACILITY_USERMODE_FILTER_MANAGER = 31 + FACILITY_BACKGROUNDCOPY = 32 + FACILITY_CONFIGURATION = 33 + FACILITY_WIA = 33 + FACILITY_STATE_MANAGEMENT = 34 + FACILITY_METADIRECTORY = 35 + FACILITY_WINDOWSUPDATE = 36 + FACILITY_DIRECTORYSERVICE = 37 + FACILITY_GRAPHICS = 38 + FACILITY_SHELL = 39 + FACILITY_NAP = 39 + FACILITY_TPM_SERVICES = 40 + FACILITY_TPM_SOFTWARE = 41 + FACILITY_UI = 42 + FACILITY_XAML = 43 + FACILITY_ACTION_QUEUE = 44 + FACILITY_PLA = 48 + FACILITY_WINDOWS_SETUP = 48 + FACILITY_FVE = 49 + FACILITY_FWP = 50 + FACILITY_WINRM = 51 + FACILITY_NDIS = 52 + FACILITY_USERMODE_HYPERVISOR = 53 + FACILITY_CMI = 54 + FACILITY_USERMODE_VIRTUALIZATION = 55 + FACILITY_USERMODE_VOLMGR = 56 + FACILITY_BCD = 57 + FACILITY_USERMODE_VHD = 58 + FACILITY_USERMODE_HNS = 59 + FACILITY_SDIAG = 60 + FACILITY_WEBSERVICES = 61 + FACILITY_WINPE = 61 + FACILITY_WPN = 62 + FACILITY_WINDOWS_STORE = 63 + FACILITY_INPUT = 64 + FACILITY_EAP = 66 + FACILITY_WINDOWS_DEFENDER = 80 + FACILITY_OPC = 81 + FACILITY_XPS = 82 + FACILITY_MBN = 84 + FACILITY_POWERSHELL = 84 + FACILITY_RAS = 83 + FACILITY_P2P_INT = 98 + FACILITY_P2P = 99 + FACILITY_DAF = 100 + FACILITY_BLUETOOTH_ATT = 101 + FACILITY_AUDIO = 102 + FACILITY_STATEREPOSITORY = 103 + FACILITY_VISUALCPP = 109 + FACILITY_SCRIPT = 112 + FACILITY_PARSE = 113 + FACILITY_BLB = 120 + FACILITY_BLB_CLI = 121 + FACILITY_WSBAPP = 122 + FACILITY_BLBUI = 128 + FACILITY_USN = 129 + FACILITY_USERMODE_VOLSNAP = 130 + FACILITY_TIERING = 131 + FACILITY_WSB_ONLINE = 133 + FACILITY_ONLINE_ID = 134 + FACILITY_DEVICE_UPDATE_AGENT = 135 + FACILITY_DRVSERVICING = 136 + FACILITY_DLS = 153 + FACILITY_DELIVERY_OPTIMIZATION = 208 + FACILITY_USERMODE_SPACES = 231 + FACILITY_USER_MODE_SECURITY_CORE = 232 + FACILITY_USERMODE_LICENSING = 234 + FACILITY_SOS = 160 + FACILITY_DEBUGGERS = 176 + FACILITY_SPP = 256 + FACILITY_RESTORE = 256 + FACILITY_DMSERVER = 256 + FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 + FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 + FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 + FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 + FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 + FACILITY_DEPLOYMENT_SERVICES_PXE = 263 + FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 + FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 + FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 + FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 + FACILITY_LINGUISTIC_SERVICES = 305 + FACILITY_AUDIOSTREAMING = 1094 + FACILITY_ACCELERATOR = 1536 + FACILITY_WMAAECMA = 1996 + FACILITY_DIRECTMUSIC = 2168 + FACILITY_DIRECT3D10 = 2169 + FACILITY_DXGI = 2170 + FACILITY_DXGI_DDI = 2171 + FACILITY_DIRECT3D11 = 2172 + FACILITY_DIRECT3D11_DEBUG = 2173 + FACILITY_DIRECT3D12 = 2174 + FACILITY_DIRECT3D12_DEBUG = 2175 + FACILITY_LEAP = 2184 + FACILITY_AUDCLNT = 2185 + FACILITY_WINCODEC_DWRITE_DWM = 2200 + FACILITY_WINML = 2192 + FACILITY_DIRECT2D = 2201 + FACILITY_DEFRAG = 2304 + FACILITY_USERMODE_SDBUS = 2305 + FACILITY_JSCRIPT = 2306 + FACILITY_PIDGENX = 2561 + FACILITY_EAS = 85 + FACILITY_WEB = 885 + FACILITY_WEB_SOCKET = 886 + FACILITY_MOBILE = 1793 + FACILITY_SQLITE = 1967 + FACILITY_UTC = 1989 + FACILITY_WEP = 2049 + FACILITY_SYNCENGINE = 2050 + FACILITY_XBOX = 2339 + FACILITY_PIX = 2748 + ERROR_SUCCESS syscall.Errno = 0 + NO_ERROR = 0 + SEC_E_OK Handle = 0x00000000 + ERROR_INVALID_FUNCTION syscall.Errno = 1 + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_ARENA_TRASHED syscall.Errno = 7 + ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 + ERROR_INVALID_BLOCK syscall.Errno = 9 + ERROR_BAD_ENVIRONMENT syscall.Errno = 10 + ERROR_BAD_FORMAT syscall.Errno = 11 + ERROR_INVALID_ACCESS syscall.Errno = 12 + ERROR_INVALID_DATA syscall.Errno = 13 + ERROR_OUTOFMEMORY syscall.Errno = 14 + ERROR_INVALID_DRIVE syscall.Errno = 15 + ERROR_CURRENT_DIRECTORY syscall.Errno = 16 + ERROR_NOT_SAME_DEVICE syscall.Errno = 17 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_WRITE_PROTECT syscall.Errno = 19 + ERROR_BAD_UNIT syscall.Errno = 20 + ERROR_NOT_READY syscall.Errno = 21 + ERROR_BAD_COMMAND syscall.Errno = 22 + ERROR_CRC syscall.Errno = 23 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_SEEK syscall.Errno = 25 + ERROR_NOT_DOS_DISK syscall.Errno = 26 + ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 + ERROR_OUT_OF_PAPER syscall.Errno = 28 + ERROR_WRITE_FAULT syscall.Errno = 29 + ERROR_READ_FAULT syscall.Errno = 30 + ERROR_GEN_FAILURE syscall.Errno = 31 + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_WRONG_DISK syscall.Errno = 34 + ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_HANDLE_DISK_FULL syscall.Errno = 39 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_REM_NOT_LIST syscall.Errno = 51 + ERROR_DUP_NAME syscall.Errno = 52 + ERROR_BAD_NETPATH syscall.Errno = 53 + ERROR_NETWORK_BUSY syscall.Errno = 54 + ERROR_DEV_NOT_EXIST syscall.Errno = 55 + ERROR_TOO_MANY_CMDS syscall.Errno = 56 + ERROR_ADAP_HDW_ERR syscall.Errno = 57 + ERROR_BAD_NET_RESP syscall.Errno = 58 + ERROR_UNEXP_NET_ERR syscall.Errno = 59 + ERROR_BAD_REM_ADAP syscall.Errno = 60 + ERROR_PRINTQ_FULL syscall.Errno = 61 + ERROR_NO_SPOOL_SPACE syscall.Errno = 62 + ERROR_PRINT_CANCELLED syscall.Errno = 63 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 + ERROR_BAD_DEV_TYPE syscall.Errno = 66 + ERROR_BAD_NET_NAME syscall.Errno = 67 + ERROR_TOO_MANY_NAMES syscall.Errno = 68 + ERROR_TOO_MANY_SESS syscall.Errno = 69 + ERROR_SHARING_PAUSED syscall.Errno = 70 + ERROR_REQ_NOT_ACCEP syscall.Errno = 71 + ERROR_REDIR_PAUSED syscall.Errno = 72 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_CANNOT_MAKE syscall.Errno = 82 + ERROR_FAIL_I24 syscall.Errno = 83 + ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 + ERROR_ALREADY_ASSIGNED syscall.Errno = 85 + ERROR_INVALID_PASSWORD syscall.Errno = 86 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_NET_WRITE_FAULT syscall.Errno = 88 + ERROR_NO_PROC_SLOTS syscall.Errno = 89 + ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 + ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 + ERROR_SEM_IS_SET syscall.Errno = 102 + ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 + ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 + ERROR_SEM_OWNER_DIED syscall.Errno = 105 + ERROR_SEM_USER_LIMIT syscall.Errno = 106 + ERROR_DISK_CHANGE syscall.Errno = 107 + ERROR_DRIVE_LOCKED syscall.Errno = 108 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_OPEN_FAILED syscall.Errno = 110 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_DISK_FULL syscall.Errno = 112 + ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 + ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 + ERROR_INVALID_CATEGORY syscall.Errno = 117 + ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 + ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_SEM_TIMEOUT syscall.Errno = 121 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_INVALID_LEVEL syscall.Errno = 124 + ERROR_NO_VOLUME_LABEL syscall.Errno = 125 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 + ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 + ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 + ERROR_NEGATIVE_SEEK syscall.Errno = 131 + ERROR_SEEK_ON_DEVICE syscall.Errno = 132 + ERROR_IS_JOIN_TARGET syscall.Errno = 133 + ERROR_IS_JOINED syscall.Errno = 134 + ERROR_IS_SUBSTED syscall.Errno = 135 + ERROR_NOT_JOINED syscall.Errno = 136 + ERROR_NOT_SUBSTED syscall.Errno = 137 + ERROR_JOIN_TO_JOIN syscall.Errno = 138 + ERROR_SUBST_TO_SUBST syscall.Errno = 139 + ERROR_JOIN_TO_SUBST syscall.Errno = 140 + ERROR_SUBST_TO_JOIN syscall.Errno = 141 + ERROR_BUSY_DRIVE syscall.Errno = 142 + ERROR_SAME_DRIVE syscall.Errno = 143 + ERROR_DIR_NOT_ROOT syscall.Errno = 144 + ERROR_DIR_NOT_EMPTY syscall.Errno = 145 + ERROR_IS_SUBST_PATH syscall.Errno = 146 + ERROR_IS_JOIN_PATH syscall.Errno = 147 + ERROR_PATH_BUSY syscall.Errno = 148 + ERROR_IS_SUBST_TARGET syscall.Errno = 149 + ERROR_SYSTEM_TRACE syscall.Errno = 150 + ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 + ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 + ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 + ERROR_LABEL_TOO_LONG syscall.Errno = 154 + ERROR_TOO_MANY_TCBS syscall.Errno = 155 + ERROR_SIGNAL_REFUSED syscall.Errno = 156 + ERROR_DISCARDED syscall.Errno = 157 + ERROR_NOT_LOCKED syscall.Errno = 158 + ERROR_BAD_THREADID_ADDR syscall.Errno = 159 + ERROR_BAD_ARGUMENTS syscall.Errno = 160 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_SIGNAL_PENDING syscall.Errno = 162 + ERROR_MAX_THRDS_REACHED syscall.Errno = 164 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_BUSY syscall.Errno = 170 + ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 + ERROR_CANCEL_VIOLATION syscall.Errno = 173 + ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 + ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 + ERROR_INVALID_ORDINAL syscall.Errno = 182 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 + ERROR_SEM_NOT_FOUND syscall.Errno = 187 + ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 + ERROR_INVALID_STACKSEG syscall.Errno = 189 + ERROR_INVALID_MODULETYPE syscall.Errno = 190 + ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 + ERROR_EXE_MARKED_INVALID syscall.Errno = 192 + ERROR_BAD_EXE_FORMAT syscall.Errno = 193 + ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 + ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 + ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 + ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 + ERROR_INVALID_SEGDPL syscall.Errno = 198 + ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 + ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 + ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 + ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_NO_SIGNAL_SENT syscall.Errno = 205 + ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 + ERROR_RING2_STACK_IN_USE syscall.Errno = 207 + ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 + ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 + ERROR_THREAD_1_INACTIVE syscall.Errno = 210 + ERROR_LOCKED syscall.Errno = 212 + ERROR_TOO_MANY_MODULES syscall.Errno = 214 + ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 + ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 + ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 + ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 + ERROR_FILE_CHECKED_OUT syscall.Errno = 220 + ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 + ERROR_BAD_FILE_TYPE syscall.Errno = 222 + ERROR_FILE_TOO_LARGE syscall.Errno = 223 + ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 + ERROR_VIRUS_INFECTED syscall.Errno = 225 + ERROR_VIRUS_DELETED syscall.Errno = 226 + ERROR_PIPE_LOCAL syscall.Errno = 229 + ERROR_BAD_PIPE syscall.Errno = 230 + ERROR_PIPE_BUSY syscall.Errno = 231 + ERROR_NO_DATA syscall.Errno = 232 + ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_NO_WORK_DONE syscall.Errno = 235 + ERROR_VC_DISCONNECTED syscall.Errno = 240 + ERROR_INVALID_EA_NAME syscall.Errno = 254 + ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 + WAIT_TIMEOUT syscall.Errno = 258 + ERROR_NO_MORE_ITEMS syscall.Errno = 259 + ERROR_CANNOT_COPY syscall.Errno = 266 + ERROR_DIRECTORY syscall.Errno = 267 + ERROR_EAS_DIDNT_FIT syscall.Errno = 275 + ERROR_EA_FILE_CORRUPT syscall.Errno = 276 + ERROR_EA_TABLE_FULL syscall.Errno = 277 + ERROR_INVALID_EA_HANDLE syscall.Errno = 278 + ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 + ERROR_NOT_OWNER syscall.Errno = 288 + ERROR_TOO_MANY_POSTS syscall.Errno = 298 + ERROR_PARTIAL_COPY syscall.Errno = 299 + ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 + ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 + ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 + ERROR_DELETE_PENDING syscall.Errno = 303 + ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 + ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 + ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 + ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 + ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 + ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 + ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 + ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 + ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 + ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 + ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 + ERROR_INVALID_TOKEN syscall.Errno = 315 + ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 + ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 + ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 + ERROR_UNDEFINED_SCOPE syscall.Errno = 319 + ERROR_INVALID_CAP syscall.Errno = 320 + ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 + ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 + ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 + ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 + ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 + ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 + ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 + ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 + ERROR_BAD_DEVICE_PATH syscall.Errno = 330 + ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 + ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 + ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 + ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 + ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 + ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 + ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 + ERROR_FT_WRITE_FAILURE syscall.Errno = 338 + ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 + ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 + ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 + ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 + ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 + ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 + ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 + ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 + ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 + ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 + ERROR_ENCLAVE_FAILURE syscall.Errno = 349 + ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 + ERROR_FAIL_SHUTDOWN syscall.Errno = 351 + ERROR_FAIL_RESTART syscall.Errno = 352 + ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 + ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 + ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 + ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 + ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 + ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 + ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 + ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 + ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 + ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 + ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 + ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 + ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 + ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 + ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 + ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 + ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 + ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 + ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 + ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 + ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 + ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 + ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 + ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 + ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 + ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 + ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 + ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 + ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 + ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 + ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 + ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 + ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 + ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 + ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 + ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 + ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 + ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 + ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 + ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 + ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 + ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 + ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 + ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 + ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 + ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 + ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 + ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 + ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 + ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 + ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 + ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 + ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 + ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 + ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 + ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 + ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 + ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 + ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 + ERROR_FT_READ_FAILURE syscall.Errno = 415 + ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 + ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 + ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 + ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 + ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 + ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 + ERROR_TIME_CRITICAL_THREAD syscall.Errno = 422 + ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 + ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 + ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 + ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 + ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 + ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 + ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 + ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 + ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 + ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 + ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 + ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 + ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 + ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 + ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 + ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 + ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 + ERROR_INVALID_ADDRESS syscall.Errno = 487 + ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 + ERROR_PARTITION_TERMINATING syscall.Errno = 1184 + ERROR_USER_PROFILE_LOAD syscall.Errno = 500 + ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 + ERROR_PIPE_CONNECTED syscall.Errno = 535 + ERROR_PIPE_LISTENING syscall.Errno = 536 + ERROR_VERIFIER_STOP syscall.Errno = 537 + ERROR_ABIOS_ERROR syscall.Errno = 538 + ERROR_WX86_WARNING syscall.Errno = 539 + ERROR_WX86_ERROR syscall.Errno = 540 + ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 + ERROR_UNWIND syscall.Errno = 542 + ERROR_BAD_STACK syscall.Errno = 543 + ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 + ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 + ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 + ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 + ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 + ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 + ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 + ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 + ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 + ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 + ERROR_CANT_WAIT syscall.Errno = 554 + ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 + ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 + ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 + ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 + ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 + ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 + ERROR_INVALID_LDT_SIZE syscall.Errno = 561 + ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 + ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 + ERROR_TOO_MANY_THREADS syscall.Errno = 565 + ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 + ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 + ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 + ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 + ERROR_NET_OPEN_FAILED syscall.Errno = 570 + ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 + ERROR_CONTROL_C_EXIT syscall.Errno = 572 + ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 + ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 + ERROR_APP_INIT_FAILURE syscall.Errno = 575 + ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 + ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 + ERROR_NO_PAGEFILE syscall.Errno = 578 + ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 + ERROR_NO_EVENT_PAIR syscall.Errno = 580 + ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 + ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 + ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 + ERROR_FLOPPY_VOLUME syscall.Errno = 584 + ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 + ERROR_BACKUP_CONTROLLER syscall.Errno = 586 + ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 + ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 + ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 + ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 + ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 + ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 + ERROR_VDM_HARD_ERROR syscall.Errno = 593 + ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 + ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 + ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 + ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 + ERROR_NOT_TINY_STREAM syscall.Errno = 598 + ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 + ERROR_CONVERT_TO_LARGE syscall.Errno = 600 + ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 + ERROR_ALLOCATE_BUCKET syscall.Errno = 602 + ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 + ERROR_INVALID_VARIANT syscall.Errno = 604 + ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 + ERROR_AUDIT_FAILED syscall.Errno = 606 + ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 + ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 + ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 + ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 + ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 + ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 + ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 + ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 + ERROR_PWD_TOO_SHORT syscall.Errno = 615 + ERROR_PWD_TOO_RECENT syscall.Errno = 616 + ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 + ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 + ERROR_INVALID_HW_PROFILE syscall.Errno = 619 + ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 + ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 + ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 + ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 + ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 + ERROR_VALIDATE_CONTINUE syscall.Errno = 625 + ERROR_NO_MORE_MATCHES syscall.Errno = 626 + ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 + ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 + ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 + ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 + ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 + ERROR_NOINTERFACE syscall.Errno = 632 + ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 + ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 + ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 + ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 + ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 + ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 + ERROR_INSUFFICIENT_POWER syscall.Errno = 639 + ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 + ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 + ERROR_PORT_NOT_SET syscall.Errno = 642 + ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 + ERROR_RANGE_NOT_FOUND syscall.Errno = 644 + ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 + ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 + ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 + ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 + ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 + ERROR_MCA_OCCURED syscall.Errno = 651 + ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 + ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 + ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 + ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 + ERROR_HIBERNATION_FAILURE syscall.Errno = 656 + ERROR_PWD_TOO_LONG syscall.Errno = 657 + ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 + ERROR_ASSERTION_FAILURE syscall.Errno = 668 + ERROR_ACPI_ERROR syscall.Errno = 669 + ERROR_WOW_ASSERTION syscall.Errno = 670 + ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 + ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 + ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 + ERROR_PNP_INVALID_ID syscall.Errno = 674 + ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 + ERROR_HANDLES_CLOSED syscall.Errno = 676 + ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 + ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 + ERROR_MEDIA_CHECK syscall.Errno = 679 + ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 + ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 + ERROR_LONGJUMP syscall.Errno = 682 + ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 + ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 + ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 + ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 + ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 + ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 + ERROR_DBG_REPLY_LATER syscall.Errno = 689 + ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 + ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 + ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 + ERROR_DBG_CONTROL_C syscall.Errno = 693 + ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 + ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 + ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 + ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 + ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 + ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 + ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 + ERROR_RXACT_STATE_CREATED syscall.Errno = 701 + ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 + ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 + ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 + ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 + ERROR_RECEIVE_PARTIAL syscall.Errno = 707 + ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 + ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 + ERROR_EVENT_DONE syscall.Errno = 710 + ERROR_EVENT_PENDING syscall.Errno = 711 + ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 + ERROR_FATAL_APP_EXIT syscall.Errno = 713 + ERROR_PREDEFINED_HANDLE syscall.Errno = 714 + ERROR_WAS_UNLOCKED syscall.Errno = 715 + ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 + ERROR_WAS_LOCKED syscall.Errno = 717 + ERROR_LOG_HARD_ERROR syscall.Errno = 718 + ERROR_ALREADY_WIN32 syscall.Errno = 719 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 + ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 + ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 + ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 + ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 + ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 + ERROR_HIBERNATED syscall.Errno = 726 + ERROR_RESUME_HIBERNATION syscall.Errno = 727 + ERROR_FIRMWARE_UPDATED syscall.Errno = 728 + ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 + ERROR_WAKE_SYSTEM syscall.Errno = 730 + ERROR_WAIT_1 syscall.Errno = 731 + ERROR_WAIT_2 syscall.Errno = 732 + ERROR_WAIT_3 syscall.Errno = 733 + ERROR_WAIT_63 syscall.Errno = 734 + ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 + ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 + ERROR_USER_APC syscall.Errno = 737 + ERROR_KERNEL_APC syscall.Errno = 738 + ERROR_ALERTED syscall.Errno = 739 + ERROR_ELEVATION_REQUIRED syscall.Errno = 740 + ERROR_REPARSE syscall.Errno = 741 + ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 + ERROR_VOLUME_MOUNTED syscall.Errno = 743 + ERROR_RXACT_COMMITTED syscall.Errno = 744 + ERROR_NOTIFY_CLEANUP syscall.Errno = 745 + ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 + ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 + ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 + ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 + ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 + ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 + ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 + ERROR_CRASH_DUMP syscall.Errno = 753 + ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 + ERROR_REPARSE_OBJECT syscall.Errno = 755 + ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 + ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 + ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 + ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 + ERROR_PROCESS_IN_JOB syscall.Errno = 760 + ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 + ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 + ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 + ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 + ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 + ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 + ERROR_DBG_CONTINUE syscall.Errno = 767 + ERROR_CALLBACK_POP_STACK syscall.Errno = 768 + ERROR_COMPRESSION_DISABLED syscall.Errno = 769 + ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 + ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 + ERROR_ROWSNOTRELEASED syscall.Errno = 772 + ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 + ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 + ERROR_NOT_CAPABLE syscall.Errno = 775 + ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 + ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 + ERROR_BADSTARTPOSITION syscall.Errno = 778 + ERROR_MEMORY_HARDWARE syscall.Errno = 779 + ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 + ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 + ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 + ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 + ERROR_MCA_EXCEPTION syscall.Errno = 784 + ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 + ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 + ERROR_ABANDON_HIBERFILE syscall.Errno = 787 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 + ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 + ERROR_BAD_MCFG_TABLE syscall.Errno = 791 + ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 + ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 + ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 + ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 + ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 + ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 + ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 + ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 + ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 + ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 + ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 + ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 + ERROR_NO_ACE_CONDITION syscall.Errno = 804 + ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 + ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 + ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 + ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 + ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 + ERROR_QUOTA_ACTIVITY syscall.Errno = 810 + ERROR_HANDLE_REVOKED syscall.Errno = 811 + ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 + ERROR_CPU_SET_INVALID syscall.Errno = 813 + ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 + ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 + ERROR_EA_ACCESS_DENIED syscall.Errno = 994 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_INCOMPLETE syscall.Errno = 996 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_NOACCESS syscall.Errno = 998 + ERROR_SWAPERROR syscall.Errno = 999 + ERROR_STACK_OVERFLOW syscall.Errno = 1001 + ERROR_INVALID_MESSAGE syscall.Errno = 1002 + ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 + ERROR_INVALID_FLAGS syscall.Errno = 1004 + ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 + ERROR_FILE_INVALID syscall.Errno = 1006 + ERROR_FULLSCREEN_MODE syscall.Errno = 1007 + ERROR_NO_TOKEN syscall.Errno = 1008 + ERROR_BADDB syscall.Errno = 1009 + ERROR_BADKEY syscall.Errno = 1010 + ERROR_CANTOPEN syscall.Errno = 1011 + ERROR_CANTREAD syscall.Errno = 1012 + ERROR_CANTWRITE syscall.Errno = 1013 + ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 + ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 + ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 + ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 + ERROR_KEY_DELETED syscall.Errno = 1018 + ERROR_NO_LOG_SPACE syscall.Errno = 1019 + ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 + ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 + ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 + ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 + ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 + ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 + ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 + ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 + ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 + ERROR_SERVICE_DISABLED syscall.Errno = 1058 + ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 + ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 + ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 + ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 + ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 + ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_PROCESS_ABORTED syscall.Errno = 1067 + ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 + ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 + ERROR_SERVICE_START_HANG syscall.Errno = 1070 + ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 + ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 + ERROR_SERVICE_EXISTS syscall.Errno = 1073 + ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 + ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 + ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 + ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 + ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 + ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 + ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 + ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 + ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 + ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 + ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 + ERROR_END_OF_MEDIA syscall.Errno = 1100 + ERROR_FILEMARK_DETECTED syscall.Errno = 1101 + ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 + ERROR_SETMARK_DETECTED syscall.Errno = 1103 + ERROR_NO_DATA_DETECTED syscall.Errno = 1104 + ERROR_PARTITION_FAILURE syscall.Errno = 1105 + ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 + ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 + ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 + ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 + ERROR_MEDIA_CHANGED syscall.Errno = 1110 + ERROR_BUS_RESET syscall.Errno = 1111 + ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 + ERROR_DLL_INIT_FAILED syscall.Errno = 1114 + ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 + ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 + ERROR_IO_DEVICE syscall.Errno = 1117 + ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 + ERROR_IRQ_BUSY syscall.Errno = 1119 + ERROR_MORE_WRITES syscall.Errno = 1120 + ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 + ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 + ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 + ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 + ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 + ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 + ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 + ERROR_DISK_RESET_FAILED syscall.Errno = 1128 + ERROR_EOM_OVERFLOW syscall.Errno = 1129 + ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 + ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 + ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 + ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 + ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 + ERROR_TOO_MANY_LINKS syscall.Errno = 1142 + ERROR_OLD_WIN_VERSION syscall.Errno = 1150 + ERROR_APP_WRONG_OS syscall.Errno = 1151 + ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 + ERROR_RMODE_APP syscall.Errno = 1153 + ERROR_INVALID_DLL syscall.Errno = 1154 + ERROR_NO_ASSOCIATION syscall.Errno = 1155 + ERROR_DDE_FAIL syscall.Errno = 1156 + ERROR_DLL_NOT_FOUND syscall.Errno = 1157 + ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 + ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 + ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 + ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 + ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 + ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 + ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 + ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 + ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 + ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_MATCH syscall.Errno = 1169 + ERROR_SET_NOT_FOUND syscall.Errno = 1170 + ERROR_POINT_NOT_FOUND syscall.Errno = 1171 + ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 + ERROR_NO_VOLUME_ID syscall.Errno = 1173 + ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 + ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 + ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 + ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 + ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 + ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 + ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 + ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 + ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 + ERROR_BAD_DEVICE syscall.Errno = 1200 + ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 + ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 + ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 + ERROR_BAD_PROVIDER syscall.Errno = 1204 + ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 + ERROR_BAD_PROFILE syscall.Errno = 1206 + ERROR_NOT_CONTAINER syscall.Errno = 1207 + ERROR_EXTENDED_ERROR syscall.Errno = 1208 + ERROR_INVALID_GROUPNAME syscall.Errno = 1209 + ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 + ERROR_INVALID_EVENTNAME syscall.Errno = 1211 + ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 + ERROR_INVALID_SERVICENAME syscall.Errno = 1213 + ERROR_INVALID_NETNAME syscall.Errno = 1214 + ERROR_INVALID_SHARENAME syscall.Errno = 1215 + ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 + ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 + ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 + ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 + ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 + ERROR_DUP_DOMAINNAME syscall.Errno = 1221 + ERROR_NO_NETWORK syscall.Errno = 1222 + ERROR_CANCELLED syscall.Errno = 1223 + ERROR_USER_MAPPED_FILE syscall.Errno = 1224 + ERROR_CONNECTION_REFUSED syscall.Errno = 1225 + ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 + ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 + ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 + ERROR_CONNECTION_INVALID syscall.Errno = 1229 + ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 + ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 + ERROR_HOST_UNREACHABLE syscall.Errno = 1232 + ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 + ERROR_PORT_UNREACHABLE syscall.Errno = 1234 + ERROR_REQUEST_ABORTED syscall.Errno = 1235 + ERROR_CONNECTION_ABORTED syscall.Errno = 1236 + ERROR_RETRY syscall.Errno = 1237 + ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 + ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 + ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 + ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 + ERROR_ALREADY_REGISTERED syscall.Errno = 1242 + ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 + ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 + ERROR_NOT_LOGGED_ON syscall.Errno = 1245 + ERROR_CONTINUE syscall.Errno = 1246 + ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 + ERROR_NO_MORE_DEVICES syscall.Errno = 1248 + ERROR_NO_SUCH_SITE syscall.Errno = 1249 + ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 + ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 + ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 + ERROR_BAD_USER_PROFILE syscall.Errno = 1253 + ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 + ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 + ERROR_HOST_DOWN syscall.Errno = 1256 + ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 + ERROR_NON_DOMAIN_SID syscall.Errno = 1258 + ERROR_APPHELP_BLOCK syscall.Errno = 1259 + ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 + ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 + ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 + ERROR_PKINIT_FAILURE syscall.Errno = 1263 + ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 + ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 + ERROR_MACHINE_LOCKED syscall.Errno = 1271 + ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 + ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 + ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 + ERROR_DRIVER_BLOCKED syscall.Errno = 1275 + ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 + ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 + ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 + ERROR_RECOVERY_FAILURE syscall.Errno = 1279 + ERROR_ALREADY_FIBER syscall.Errno = 1280 + ERROR_ALREADY_THREAD syscall.Errno = 1281 + ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 + ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 + ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 + ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 + ERROR_VDM_DISALLOWED syscall.Errno = 1286 + ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 + ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 + ERROR_BEYOND_VDL syscall.Errno = 1289 + ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 + ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 + ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 + ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 + ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 + ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 + ERROR_CONTENT_BLOCKED syscall.Errno = 1296 + ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 + ERROR_APP_HANG syscall.Errno = 1298 + ERROR_INVALID_LABEL syscall.Errno = 1299 + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 + ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 + ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 + ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 + ERROR_UNKNOWN_REVISION syscall.Errno = 1305 + ERROR_REVISION_MISMATCH syscall.Errno = 1306 + ERROR_INVALID_OWNER syscall.Errno = 1307 + ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 + ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 + ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 + ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 + ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 + ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 + ERROR_USER_EXISTS syscall.Errno = 1316 + ERROR_NO_SUCH_USER syscall.Errno = 1317 + ERROR_GROUP_EXISTS syscall.Errno = 1318 + ERROR_NO_SUCH_GROUP syscall.Errno = 1319 + ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 + ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 + ERROR_LAST_ADMIN syscall.Errno = 1322 + ERROR_WRONG_PASSWORD syscall.Errno = 1323 + ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 + ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 + ERROR_LOGON_FAILURE syscall.Errno = 1326 + ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 + ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 + ERROR_INVALID_WORKSTATION syscall.Errno = 1329 + ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 + ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 + ERROR_NONE_MAPPED syscall.Errno = 1332 + ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 + ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 + ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 + ERROR_INVALID_ACL syscall.Errno = 1336 + ERROR_INVALID_SID syscall.Errno = 1337 + ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 + ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 + ERROR_SERVER_DISABLED syscall.Errno = 1341 + ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 + ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 + ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 + ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 + ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 + ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 + ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 + ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 + ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 + ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 + ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 + ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 + ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 + ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 + ERROR_DOMAIN_EXISTS syscall.Errno = 1356 + ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 + ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 + ERROR_INTERNAL_ERROR syscall.Errno = 1359 + ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 + ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 + ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 + ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 + ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 + ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 + ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 + ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 + ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 + ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 + ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 + ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 + ERROR_SPECIAL_GROUP syscall.Errno = 1372 + ERROR_SPECIAL_USER syscall.Errno = 1373 + ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 + ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 + ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 + ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 + ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 + ERROR_ALIAS_EXISTS syscall.Errno = 1379 + ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 + ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 + ERROR_SECRET_TOO_LONG syscall.Errno = 1382 + ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 + ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 + ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 + ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 + ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 + ERROR_INVALID_MEMBER syscall.Errno = 1388 + ERROR_TOO_MANY_SIDS syscall.Errno = 1389 + ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 + ERROR_NO_INHERITANCE syscall.Errno = 1391 + ERROR_FILE_CORRUPT syscall.Errno = 1392 + ERROR_DISK_CORRUPT syscall.Errno = 1393 + ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 + ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 + ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 + ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 + ERROR_TIME_SKEW syscall.Errno = 1398 + ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 + ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 + ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 + ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 + ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 + ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 + ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 + ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 + ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 + ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 + ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 + ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 + ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 + ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 + ERROR_INVALID_INDEX syscall.Errno = 1413 + ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 + ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 + ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 + ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 + ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 + ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 + ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 + ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 + ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 + ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 + ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 + ERROR_DC_NOT_FOUND syscall.Errno = 1425 + ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 + ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 + ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 + ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 + ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 + ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 + ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 + ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 + ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 + ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 + ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 + ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 + ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 + ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 + ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 + ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 + ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 + ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 + ERROR_INVALID_THREAD_ID syscall.Errno = 1444 + ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 + ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 + ERROR_NO_SCROLLBARS syscall.Errno = 1447 + ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 + ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 + ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 + ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 + ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 + ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 + ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 + ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 + ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 + ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 + ERROR_INCORRECT_SIZE syscall.Errno = 1462 + ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 + ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 + ERROR_XML_PARSE_ERROR syscall.Errno = 1465 + ERROR_XMLDSIG_ERROR syscall.Errno = 1466 + ERROR_RESTART_APPLICATION syscall.Errno = 1467 + ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 + ERROR_AUTHIP_FAILURE syscall.Errno = 1469 + ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 + ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 + ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 + ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 + ERROR_LOG_FILE_FULL syscall.Errno = 1502 + ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 + ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 + ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 + ERROR_INVALID_TASK_NAME syscall.Errno = 1550 + ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 + ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 + ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 + ERROR_INSTALL_USEREXIT syscall.Errno = 1602 + ERROR_INSTALL_FAILURE syscall.Errno = 1603 + ERROR_INSTALL_SUSPEND syscall.Errno = 1604 + ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 + ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 + ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 + ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 + ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 + ERROR_BAD_CONFIGURATION syscall.Errno = 1610 + ERROR_INDEX_ABSENT syscall.Errno = 1611 + ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 + ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 + ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 + ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 + ERROR_INVALID_FIELD syscall.Errno = 1616 + ERROR_DEVICE_REMOVED syscall.Errno = 1617 + ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 + ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 + ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 + ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 + ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 + ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 + ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 + ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 + ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 + ERROR_FUNCTION_FAILED syscall.Errno = 1627 + ERROR_INVALID_TABLE syscall.Errno = 1628 + ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 + ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 + ERROR_CREATE_FAILED syscall.Errno = 1631 + ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 + ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 + ERROR_INSTALL_NOTUSED syscall.Errno = 1634 + ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 + ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 + ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 + ERROR_PRODUCT_VERSION syscall.Errno = 1638 + ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 + ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 + ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 + ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 + ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 + ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 + ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 + ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 + ERROR_UNKNOWN_PATCH syscall.Errno = 1647 + ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 + ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 + ERROR_INVALID_PATCH_XML syscall.Errno = 1650 + ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 + ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 + ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 + ERROR_INSTALL_REJECTED syscall.Errno = 1654 + ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 + ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 + ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 + ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 + ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 + RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 + RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 + RPC_S_INVALID_BINDING syscall.Errno = 1702 + RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 + RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 + RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 + RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 + RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 + RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 + RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 + RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 + RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 + RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 + RPC_S_ALREADY_LISTENING syscall.Errno = 1713 + RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 + RPC_S_NOT_LISTENING syscall.Errno = 1715 + RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 + RPC_S_UNKNOWN_IF syscall.Errno = 1717 + RPC_S_NO_BINDINGS syscall.Errno = 1718 + RPC_S_NO_PROTSEQS syscall.Errno = 1719 + RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 + RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 + RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 + RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 + RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 + RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 + RPC_S_CALL_FAILED syscall.Errno = 1726 + RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 + RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 + RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 + RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 + RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 + RPC_S_INVALID_TAG syscall.Errno = 1733 + RPC_S_INVALID_BOUND syscall.Errno = 1734 + RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 + RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 + RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 + RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 + RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 + RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 + RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 + RPC_S_STRING_TOO_LONG syscall.Errno = 1743 + RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 + RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 + RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 + RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 + RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 + RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 + RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 + EPT_S_INVALID_ENTRY syscall.Errno = 1751 + EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 + EPT_S_NOT_REGISTERED syscall.Errno = 1753 + RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 + RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 + RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 + RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 + RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 + RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 + RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 + RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 + RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 + RPC_S_INVALID_NAF_ID syscall.Errno = 1763 + RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 + RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 + RPC_S_INTERNAL_ERROR syscall.Errno = 1766 + RPC_S_ZERO_DIVIDE syscall.Errno = 1767 + RPC_S_ADDRESS_ERROR syscall.Errno = 1768 + RPC_S_FP_DIV_ZERO syscall.Errno = 1769 + RPC_S_FP_UNDERFLOW syscall.Errno = 1770 + RPC_S_FP_OVERFLOW syscall.Errno = 1771 + RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 + RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 + RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 + RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 + RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 + RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 + RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 + RPC_X_NULL_REF_POINTER syscall.Errno = 1780 + RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 + RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 + RPC_X_BAD_STUB_DATA syscall.Errno = 1783 + ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 + ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 + ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 + ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 + ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 + ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 + ERROR_TRUST_FAILURE syscall.Errno = 1790 + RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 + ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 + ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 + ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 + ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 + ERROR_UNKNOWN_PORT syscall.Errno = 1796 + ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 + ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 + ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 + ERROR_INVALID_PRIORITY syscall.Errno = 1800 + ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 + ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 + ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 + ERROR_INVALID_DATATYPE syscall.Errno = 1804 + ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 + RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 + ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 + ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 + ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 + ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 + ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 + ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 + ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 + ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 + ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 + ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 + RPC_S_NO_INTERFACES syscall.Errno = 1817 + RPC_S_CALL_CANCELLED syscall.Errno = 1818 + RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 + RPC_S_COMM_FAILURE syscall.Errno = 1820 + RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 + RPC_S_NO_PRINC_NAME syscall.Errno = 1822 + RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 + RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 + RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 + RPC_S_NOT_CANCELLED syscall.Errno = 1826 + RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 + RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 + RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 + RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 + RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 + RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 + RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 + RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 + RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 + RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 + RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 + EPT_S_CANT_CREATE syscall.Errno = 1899 + RPC_S_INVALID_OBJECT syscall.Errno = 1900 + ERROR_INVALID_TIME syscall.Errno = 1901 + ERROR_INVALID_FORM_NAME syscall.Errno = 1902 + ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 + ERROR_ALREADY_WAITING syscall.Errno = 1904 + ERROR_PRINTER_DELETED syscall.Errno = 1905 + ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 + ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 + ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 + ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 + OR_INVALID_OXID syscall.Errno = 1910 + OR_INVALID_OID syscall.Errno = 1911 + OR_INVALID_SET syscall.Errno = 1912 + RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 + RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 + RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 + RPC_X_PIPE_CLOSED syscall.Errno = 1916 + RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 + RPC_X_PIPE_EMPTY syscall.Errno = 1918 + ERROR_NO_SITENAME syscall.Errno = 1919 + ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 + ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 + RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 + RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 + RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 + RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 + RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 + RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 + RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 + RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 + ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 + ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 + ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 + ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 + ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 + ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 + ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 + ERROR_NTLM_BLOCKED syscall.Errno = 1937 + ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 + ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 + ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 + ERROR_BAD_DRIVER syscall.Errno = 2001 + ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 + ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 + ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 + ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 + ERROR_INVALID_CMM syscall.Errno = 2010 + ERROR_INVALID_PROFILE syscall.Errno = 2011 + ERROR_TAG_NOT_FOUND syscall.Errno = 2012 + ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 + ERROR_DUPLICATE_TAG syscall.Errno = 2014 + ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 + ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 + ERROR_INVALID_COLORSPACE syscall.Errno = 2017 + ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 + ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 + ERROR_INVALID_TRANSFORM syscall.Errno = 2020 + ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 + ERROR_INVALID_COLORINDEX syscall.Errno = 2022 + ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 + ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 + ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 + ERROR_BAD_USERNAME syscall.Errno = 2202 + ERROR_NOT_CONNECTED syscall.Errno = 2250 + ERROR_OPEN_FILES syscall.Errno = 2401 + ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 + ERROR_DEVICE_IN_USE syscall.Errno = 2404 + ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 + ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 + ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 + ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 + ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 + ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 + ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 + ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 + ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 + ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 + ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 + ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 + ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 + ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 + ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 + ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 + ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 + ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 + ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 + ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 + ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 + ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 + ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 + ERROR_REQUEST_PAUSED syscall.Errno = 3050 + ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 + ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 + ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 + ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 + ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 + ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 + ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 + ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 + ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 + ERROR_WINS_INTERNAL syscall.Errno = 4000 + ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 + ERROR_STATIC_INIT syscall.Errno = 4002 + ERROR_INC_BACKUP syscall.Errno = 4003 + ERROR_FULL_BACKUP syscall.Errno = 4004 + ERROR_REC_NON_EXISTENT syscall.Errno = 4005 + ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 + PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 + PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 + PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 + PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 + PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 + PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 + PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 + PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 + PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 + PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 + PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 + PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 + PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 + PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 + PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 + PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 + PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 + ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 + ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 + ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 + ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 + ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 + ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 + ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 + ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 + ERROR_WMI_DP_FAILED syscall.Errno = 4209 + ERROR_WMI_INVALID_MOF syscall.Errno = 4210 + ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 + ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 + ERROR_WMI_READ_ONLY syscall.Errno = 4213 + ERROR_WMI_SET_FAILURE syscall.Errno = 4214 + ERROR_NOT_APPCONTAINER syscall.Errno = 4250 + ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 + ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 + ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 + ERROR_INVALID_MEDIA syscall.Errno = 4300 + ERROR_INVALID_LIBRARY syscall.Errno = 4301 + ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 + ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 + ERROR_MEDIA_OFFLINE syscall.Errno = 4304 + ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 + ERROR_EMPTY syscall.Errno = 4306 + ERROR_NOT_EMPTY syscall.Errno = 4307 + ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 + ERROR_RESOURCE_DISABLED syscall.Errno = 4309 + ERROR_INVALID_CLEANER syscall.Errno = 4310 + ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 + ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 + ERROR_DATABASE_FAILURE syscall.Errno = 4313 + ERROR_DATABASE_FULL syscall.Errno = 4314 + ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 + ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 + ERROR_INVALID_OPERATION syscall.Errno = 4317 + ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 + ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 + ERROR_REQUEST_REFUSED syscall.Errno = 4320 + ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 + ERROR_LIBRARY_FULL syscall.Errno = 4322 + ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 + ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 + ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 + ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 + ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 + ERROR_TRANSPORT_FULL syscall.Errno = 4328 + ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 + ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 + ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 + ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 + ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 + ERROR_UNEXPECTED_OMID syscall.Errno = 4334 + ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 + ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 + ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 + ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 + ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 + ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 + ERROR_IEPORT_FULL syscall.Errno = 4341 + ERROR_FILE_OFFLINE syscall.Errno = 4350 + ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 + ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 + ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 + ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 + ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 + ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 + ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 + ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 + ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 + ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 + ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 + ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 + ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 + ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 + ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 + ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 + ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 + ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 + ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 + ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 + ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 + ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 + ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 + ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 + ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 + ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 + ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 + ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 + ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 + ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 + ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 + ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 + ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 + ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 + ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 + ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 + ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 + ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 + ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 + ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 + ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 + ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 + ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 + ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 + ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 + ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 + ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 + ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 + ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 + ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 + ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 + ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 + ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 + ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 + ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 + ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 + ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 + ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 + ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 + ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 + ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 + ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 + ERROR_OBJECT_IN_LIST syscall.Errno = 5011 + ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 + ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 + ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 + ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 + ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 + ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 + ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 + ERROR_RESOURCE_ONLINE syscall.Errno = 5019 + ERROR_QUORUM_RESOURCE syscall.Errno = 5020 + ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 + ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 + ERROR_INVALID_STATE syscall.Errno = 5023 + ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 + ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 + ERROR_CORE_RESOURCE syscall.Errno = 5026 + ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 + ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 + ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 + ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 + ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 + ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 + ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 + ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 + ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 + ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 + ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 + ERROR_RESOURCE_FAILED syscall.Errno = 5038 + ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 + ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 + ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 + ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 + ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 + ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 + ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 + ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 + ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 + ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 + ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 + ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 + ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 + ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 + ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 + ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 + ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 + ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 + ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 + ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 + ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 + ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 + ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 + ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 + ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 + ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 + ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 + ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 + ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 + ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 + ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 + ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 + ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 + ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 + ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 + ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 + ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 + ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 + ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 + ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 + ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 + ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 + ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 + ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 + ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 + ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 + ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 + ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 + ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 + ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 + ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 + ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 + ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 + ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 + ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 + ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 + ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 + ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 + ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 + ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 + ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 + ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 + ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 + ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 + ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 + ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 + ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 + ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 + ERROR_CLUSTER_POISONED syscall.Errno = 5907 + ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 + ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 + ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 + ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 + ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 + ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 + ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 + ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 + ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 + ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 + ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 + ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 + ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 + ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 + ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 + ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 + ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 + ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 + ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 + ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 + ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 + ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 + ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 + ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 + ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 + ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 + ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 + ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 + ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 + ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 + ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 + ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 + ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 + ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 + ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 + ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 + ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 + ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 + ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 + ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 + ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 + ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 + ERROR_NON_CSV_PATH syscall.Errno = 5950 + ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 + ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 + ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 + ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 + ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 + ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 + ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 + ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 + ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 + ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 + ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 + ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 + ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 + ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 + ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 + ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 + ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 + ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 + ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 + ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 + ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 + ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 + ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 + ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 + ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 + ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 + ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 + ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 + ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 + ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 + ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 + ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 + ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 + ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 + ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 + ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 + ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 + ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 + ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 + ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 + ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 + ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 + ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 + ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 + ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 + ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 + ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 + ERROR_DECRYPTION_FAILED syscall.Errno = 6001 + ERROR_FILE_ENCRYPTED syscall.Errno = 6002 + ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 + ERROR_NO_EFS syscall.Errno = 6004 + ERROR_WRONG_EFS syscall.Errno = 6005 + ERROR_NO_USER_KEYS syscall.Errno = 6006 + ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 + ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 + ERROR_FILE_READ_ONLY syscall.Errno = 6009 + ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 + ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 + ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 + ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 + ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 + ERROR_EFS_DISABLED syscall.Errno = 6015 + ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 + ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 + ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 + ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 + ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 + ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 + ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 + ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 + SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 + ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 + ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 + ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 + ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 + ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 + ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 + ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 + ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 + ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 + ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 + ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 + ERROR_LOG_NO_RESTART syscall.Errno = 6611 + ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 + ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 + ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 + ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 + ERROR_LOG_CANT_DELETE syscall.Errno = 6616 + ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 + ERROR_LOG_START_OF_LOG syscall.Errno = 6618 + ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 + ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 + ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 + ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 + ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 + ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 + ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 + ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 + ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 + ERROR_LOG_FULL syscall.Errno = 6628 + ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 + ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 + ERROR_LOG_DEDICATED syscall.Errno = 6631 + ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 + ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 + ERROR_LOG_EPHEMERAL syscall.Errno = 6634 + ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 + ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 + ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 + ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 + ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 + ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 + ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 + ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 + ERROR_LOG_STATE_INVALID syscall.Errno = 6643 + ERROR_LOG_PINNED syscall.Errno = 6644 + ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 + ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 + ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 + ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 + ERROR_INVALID_TRANSACTION syscall.Errno = 6700 + ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 + ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 + ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 + ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 + ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 + ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 + ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 + ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 + ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 + ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 + ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 + ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 + ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 + ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 + ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 + ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 + ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 + ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 + ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 + ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 + ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 + ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 + ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 + ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 + ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 + ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 + ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 + ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 + ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 + ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 + ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 + ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 + ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 + ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 + ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 + ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 + ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 + ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 + ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 + ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 + ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 + ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 + ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 + ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 + ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 + ERROR_NO_TXF_METADATA syscall.Errno = 6816 + ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 + ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 + ERROR_RM_DISCONNECTED syscall.Errno = 6819 + ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 + ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 + ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 + ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 + ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 + ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 + ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 + ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 + ERROR_TM_VOLATILE syscall.Errno = 6828 + ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 + ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 + ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 + ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 + ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 + ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 + ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 + ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 + ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 + ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 + ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 + ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 + ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 + ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 + ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 + ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 + ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 + ERROR_FLOATED_SECTION syscall.Errno = 6846 + ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 + ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 + ERROR_BAD_CLUSTERS syscall.Errno = 6849 + ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 + ERROR_VOLUME_DIRTY syscall.Errno = 6851 + ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 + ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 + ERROR_EXPIRED_HANDLE syscall.Errno = 6854 + ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 + ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 + ERROR_CTX_INVALID_PD syscall.Errno = 7002 + ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 + ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 + ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 + ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 + ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 + ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 + ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 + ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 + ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 + ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 + ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 + ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 + ERROR_CTX_TD_ERROR syscall.Errno = 7017 + ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 + ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 + ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 + ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 + ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 + ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 + ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 + ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 + ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 + ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 + ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 + ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 + ERROR_CTX_INVALID_WD syscall.Errno = 7049 + ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 + ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 + ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 + ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 + ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 + ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 + ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 + ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 + ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 + ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 + ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 + ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 + ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 + ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 + ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 + ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 + ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 + ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 + ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 + ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 + ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 + FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 + FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 + FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 + FRS_ERR_INTERNAL_API syscall.Errno = 8004 + FRS_ERR_INTERNAL syscall.Errno = 8005 + FRS_ERR_SERVICE_COMM syscall.Errno = 8006 + FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 + FRS_ERR_AUTHENTICATION syscall.Errno = 8008 + FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 + FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 + FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 + FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 + FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 + FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 + FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 + FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 + FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 + DS_S_SUCCESS = ERROR_SUCCESS + ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 + ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 + ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 + ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 + ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 + ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 + ERROR_DS_BUSY syscall.Errno = 8206 + ERROR_DS_UNAVAILABLE syscall.Errno = 8207 + ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 + ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 + ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 + ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 + ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 + ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 + ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 + ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 + ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 + ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 + ERROR_SHARED_POLICY syscall.Errno = 8218 + ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 + ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 + ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 + ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 + ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 + ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 + ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 + ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 + ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 + ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 + ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 + ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 + ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 + ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 + ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 + ERROR_DS_REFERRAL syscall.Errno = 8235 + ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 + ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 + ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 + ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 + ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 + ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 + ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 + ERROR_DS_IS_LEAF syscall.Errno = 8243 + ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 + ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 + ERROR_DS_LOOP_DETECT syscall.Errno = 8246 + ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 + ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 + ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 + ERROR_DS_SERVER_DOWN syscall.Errno = 8250 + ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 + ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 + ERROR_DS_DECODING_ERROR syscall.Errno = 8253 + ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 + ERROR_DS_PARAM_ERROR syscall.Errno = 8255 + ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 + ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 + ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 + ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 + ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 + ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 + ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 + ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 + ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 + ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 + ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 + ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 + ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 + ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 + ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 + ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 + ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 + ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 + ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 + ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 + ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 + ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 + ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 + ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 + ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 + ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 + ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 + ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 + ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 + ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 + ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 + ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 + ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 + ERROR_DS_NO_CHAINING syscall.Errno = 8327 + ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 + ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 + ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 + ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 + ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 + ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 + ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 + ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 + ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 + ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 + ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 + ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 + ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 + ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 + ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 + ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 + ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 + ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 + ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 + ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 + ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 + ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 + ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 + ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 + ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 + ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 + ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 + ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 + ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 + ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 + ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 + ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 + ERROR_DS_INVALID_DMD syscall.Errno = 8360 + ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 + ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 + ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 + ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 + ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 + ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 + ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 + ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 + ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 + ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 + ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 + ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 + ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 + ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 + ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 + ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 + ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 + ERROR_DS_DUP_RDN syscall.Errno = 8378 + ERROR_DS_DUP_OID syscall.Errno = 8379 + ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 + ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 + ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 + ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 + ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 + ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 + ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 + ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 + ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 + ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 + ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 + ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 + ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 + ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 + ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 + ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 + ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 + ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 + ERROR_DS_CANT_DELETE syscall.Errno = 8398 + ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 + ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 + ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 + ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 + ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 + ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 + ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 + ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 + ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 + ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 + ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 + ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 + ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 + ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 + ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 + ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 + ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 + ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 + ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 + ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 + ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 + ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 + ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 + ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 + ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 + ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 + ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 + ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 + ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 + ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 + ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 + ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 + ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 + ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 + ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 + ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 + ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 + ERROR_DS_DRA_GENERIC syscall.Errno = 8436 + ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 + ERROR_DS_DRA_BUSY syscall.Errno = 8438 + ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 + ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 + ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 + ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 + ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 + ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 + ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 + ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 + ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 + ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 + ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 + ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 + ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 + ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 + ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 + ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 + ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 + ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 + ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 + ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 + ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 + ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 + ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 + ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 + ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 + ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 + ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 + ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 + ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 + ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 + ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 + ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 + ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 + ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 + ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 + ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 + ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 + ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 + ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 + ERROR_DS_DS_REQUIRED syscall.Errno = 8478 + ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 + ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 + ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 + ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 + ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 + ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 + ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 + ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 + ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 + ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 + ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 + ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 + ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 + ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 + ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 + ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 + ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 + ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 + ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 + ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 + ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 + ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 + ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 + ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 + ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 + ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 + ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 + ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 + ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 + ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 + ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 + ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 + ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 + ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 + ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 + ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 + ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 + ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 + ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 + ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 + ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 + ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 + ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 + ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 + ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 + ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 + ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 + ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 + ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 + ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 + ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 + ERROR_DS_CANT_START syscall.Errno = 8531 + ERROR_DS_INIT_FAILURE syscall.Errno = 8532 + ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 + ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 + ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 + ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 + ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 + ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 + ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 + ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 + ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 + ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 + ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 + ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 + ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 + ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 + ERROR_DS_GC_REQUIRED syscall.Errno = 8547 + ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 + ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 + ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 + ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 + ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 + ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 + ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 + ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 + ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 + ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 + ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 + ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 + ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 + ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 + ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 + ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 + ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 + ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 + ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 + ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 + ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 + ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 + ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 + ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 + ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 + ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 + ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 + ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 + ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 + ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 + ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 + ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 + ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 + ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 + ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 + ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 + ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 + ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 + ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 + ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 + ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 + ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 + ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 + ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 + ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 + ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 + ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 + ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 + ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 + ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 + ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 + ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 + ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 + ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 + ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 + ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 + ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 + ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 + ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 + ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 + ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 + ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 + ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 + ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 + ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 + ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 + ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 + ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 + ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 + ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 + ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 + ERROR_NO_SECRETS syscall.Errno = 8620 + ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 + ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 + ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 + ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 + ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 + ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 + ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 + ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 + ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 + ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 + ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 + ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 + ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 + ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 + ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 + ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 + ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 + ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 + ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 + ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 + ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 + ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 + ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 + ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 + ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 + ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 + ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 + ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 + ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 + ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 + DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 + DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS + DNS_ERROR_MASK syscall.Errno = 0x00002328 + DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 + DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 + DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 + DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 + DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 + DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 + DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 + DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 + DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 + DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 + DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 + DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 + DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 + DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME + DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 + DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 + DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 + DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 + DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 + DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 + DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 + DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 + DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 + DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 + DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 + DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 + DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 + DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 + DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 + DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 + DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 + DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 + DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 + DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 + DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 + DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 + DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 + DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 + DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 + DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 + DNS_ERROR_INVALID_XML syscall.Errno = 9126 + DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 + DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 + DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 + DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 + DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 + DNS_INFO_NO_RECORDS syscall.Errno = 9501 + DNS_ERROR_BAD_PACKET syscall.Errno = 9502 + DNS_ERROR_NO_PACKET syscall.Errno = 9503 + DNS_ERROR_RCODE syscall.Errno = 9504 + DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 + DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET + DNS_REQUEST_PENDING syscall.Errno = 9506 + DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY + DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME + DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA + DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 + DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 + DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 + DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 + DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 + DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 + DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 + DNS_STATUS_FQDN syscall.Errno = 9557 + DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 + DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 + DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 + DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 + DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 + DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 + DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 + DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 + DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 + DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 + DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 + DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 + DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 + DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 + DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 + DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 + DNS_ERROR_ZONE_BASE syscall.Errno = 9600 + DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 + DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 + DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 + DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 + DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 + DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 + DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 + DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 + DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 + DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 + DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 + DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 + DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 + DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 + DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 + DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 + DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 + DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 + DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 + DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 + DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 + DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 + DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 + DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 + DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 + DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 + DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 + DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 + DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 + DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 + DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 + DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 + DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 + DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 + DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 + DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 + DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 + DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 + DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 + DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 + DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 + DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 + DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 + DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 + DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 + DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 + DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 + DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 + DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 + DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 + DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 + DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 + DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 + DNS_ERROR_AXFR syscall.Errno = 9752 + DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 + DNS_ERROR_SECURE_BASE syscall.Errno = 9800 + DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 + DNS_ERROR_SETUP_BASE syscall.Errno = 9850 + DNS_ERROR_NO_TCPIP syscall.Errno = 9851 + DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 + DNS_ERROR_DP_BASE syscall.Errno = 9900 + DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 + DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 + DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 + DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 + DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 + DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 + DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 + DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 + DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 + DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 + DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 + DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 + DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 + DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 + DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 + DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 + DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 + DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 + DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 + DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 + DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 + DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 + DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 + DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 + DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 + DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 + DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 + DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 + DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 + DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 + DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 + DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 + DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 + DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 + DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 + DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 + DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 + DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 + DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 + DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 + DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 + DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 + DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 + DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 + DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 + DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 + DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 + DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 + DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 + DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 + DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 + DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 + DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 + DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 + DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 + DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 + DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 + WSABASEERR syscall.Errno = 10000 + WSAEINTR syscall.Errno = 10004 + WSAEBADF syscall.Errno = 10009 + WSAEACCES syscall.Errno = 10013 + WSAEFAULT syscall.Errno = 10014 + WSAEINVAL syscall.Errno = 10022 + WSAEMFILE syscall.Errno = 10024 + WSAEWOULDBLOCK syscall.Errno = 10035 + WSAEINPROGRESS syscall.Errno = 10036 + WSAEALREADY syscall.Errno = 10037 + WSAENOTSOCK syscall.Errno = 10038 + WSAEDESTADDRREQ syscall.Errno = 10039 + WSAEMSGSIZE syscall.Errno = 10040 + WSAEPROTOTYPE syscall.Errno = 10041 + WSAENOPROTOOPT syscall.Errno = 10042 + WSAEPROTONOSUPPORT syscall.Errno = 10043 + WSAESOCKTNOSUPPORT syscall.Errno = 10044 + WSAEOPNOTSUPP syscall.Errno = 10045 + WSAEPFNOSUPPORT syscall.Errno = 10046 + WSAEAFNOSUPPORT syscall.Errno = 10047 + WSAEADDRINUSE syscall.Errno = 10048 + WSAEADDRNOTAVAIL syscall.Errno = 10049 + WSAENETDOWN syscall.Errno = 10050 + WSAENETUNREACH syscall.Errno = 10051 + WSAENETRESET syscall.Errno = 10052 + WSAECONNABORTED syscall.Errno = 10053 + WSAECONNRESET syscall.Errno = 10054 + WSAENOBUFS syscall.Errno = 10055 + WSAEISCONN syscall.Errno = 10056 + WSAENOTCONN syscall.Errno = 10057 + WSAESHUTDOWN syscall.Errno = 10058 + WSAETOOMANYREFS syscall.Errno = 10059 + WSAETIMEDOUT syscall.Errno = 10060 + WSAECONNREFUSED syscall.Errno = 10061 + WSAELOOP syscall.Errno = 10062 + WSAENAMETOOLONG syscall.Errno = 10063 + WSAEHOSTDOWN syscall.Errno = 10064 + WSAEHOSTUNREACH syscall.Errno = 10065 + WSAENOTEMPTY syscall.Errno = 10066 + WSAEPROCLIM syscall.Errno = 10067 + WSAEUSERS syscall.Errno = 10068 + WSAEDQUOT syscall.Errno = 10069 + WSAESTALE syscall.Errno = 10070 + WSAEREMOTE syscall.Errno = 10071 + WSASYSNOTREADY syscall.Errno = 10091 + WSAVERNOTSUPPORTED syscall.Errno = 10092 + WSANOTINITIALISED syscall.Errno = 10093 + WSAEDISCON syscall.Errno = 10101 + WSAENOMORE syscall.Errno = 10102 + WSAECANCELLED syscall.Errno = 10103 + WSAEINVALIDPROCTABLE syscall.Errno = 10104 + WSAEINVALIDPROVIDER syscall.Errno = 10105 + WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 + WSASYSCALLFAILURE syscall.Errno = 10107 + WSASERVICE_NOT_FOUND syscall.Errno = 10108 + WSATYPE_NOT_FOUND syscall.Errno = 10109 + WSA_E_NO_MORE syscall.Errno = 10110 + WSA_E_CANCELLED syscall.Errno = 10111 + WSAEREFUSED syscall.Errno = 10112 + WSAHOST_NOT_FOUND syscall.Errno = 11001 + WSATRY_AGAIN syscall.Errno = 11002 + WSANO_RECOVERY syscall.Errno = 11003 + WSANO_DATA syscall.Errno = 11004 + WSA_QOS_RECEIVERS syscall.Errno = 11005 + WSA_QOS_SENDERS syscall.Errno = 11006 + WSA_QOS_NO_SENDERS syscall.Errno = 11007 + WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 + WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 + WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 + WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 + WSA_QOS_BAD_STYLE syscall.Errno = 11012 + WSA_QOS_BAD_OBJECT syscall.Errno = 11013 + WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 + WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 + WSA_QOS_ESERVICETYPE syscall.Errno = 11016 + WSA_QOS_EFLOWSPEC syscall.Errno = 11017 + WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 + WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 + WSA_QOS_EFILTERTYPE syscall.Errno = 11020 + WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 + WSA_QOS_EOBJLENGTH syscall.Errno = 11022 + WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 + WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 + WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 + WSA_QOS_EFLOWDESC syscall.Errno = 11026 + WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 + WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 + WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 + WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 + WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 + WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 + WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 + ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 + ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 + ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 + ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 + ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 + ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 + ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 + ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 + ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 + ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 + ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 + ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 + ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 + ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 + ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 + ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 + ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 + ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 + ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 + ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 + ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 + ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 + ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 + ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 + WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 + WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 + ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 + ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 + ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 + ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 + ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 + ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 + ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 + ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 + ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 + ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 + ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 + ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 + ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 + ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 + ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 + ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 + ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 + ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 + ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 + ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 + ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 + ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 + ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 + ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 + ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 + ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 + ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 + ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 + ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 + ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 + ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 + ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 + ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 + ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 + ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 + ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 + ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 + ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 + ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 + ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 + ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 + ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 + ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 + ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 + ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 + ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 + ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 + ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 + ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 + ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 + ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 + ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 + ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 + ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 + ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 + ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 + ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 + ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 + ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 + ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 + ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 + ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 + ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 + ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 + ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 + ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 + ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 + ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 + ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 + ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 + ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 + ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 + ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 + ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 + ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 + ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 + ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 + ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 + ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 + ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 + ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 + ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 + ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 + ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 + ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 + ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 + ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 + ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 + ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 + ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 + ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 + ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 + ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 + ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 + ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 + ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 + ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 + ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 + ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 + ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 + ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 + ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 + ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 + ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 + ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 + ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 + ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 + ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 + ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 + ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 + ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 + ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 + ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 + ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 + ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 + ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 + ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 + ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 + ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 + ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 + ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 + ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 + ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 + ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 + ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 + ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 + ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 + ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 + ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 + ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 + ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 + ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 + ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 + ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 + ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 + ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 + ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 + ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 + ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 + ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 + ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 + ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 + ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 + ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 + ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 + ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 + ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 + ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 + ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 + ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 + ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 + ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 + ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 + ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 + ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 + ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 + ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 + ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 + ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 + ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 + ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 + ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 + ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 + ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 + ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 + ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 + ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 + ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 + ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 + ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 + ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 + ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 + ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 + ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 + ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 + ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 + ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 + ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 + ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 + ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 + ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 + ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 + ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 + ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 + ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 + ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 + ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 + ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 + ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 + ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 + ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 + ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 + ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 + ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 + ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 + ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 + ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 + ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 + ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 + ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 + ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 + ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 + ERROR_SXS_CORRUPTION syscall.Errno = 14083 + ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 + ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 + ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 + ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 + ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 + ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 + ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 + ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 + ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 + ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 + ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 + ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 + ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 + ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 + ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 + ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 + ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 + ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 + ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 + ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 + ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 + ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 + ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 + ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 + ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 + ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 + ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 + ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 + ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 + ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 + ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 + ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 + ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 + ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 + ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 + ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 + ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 + ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 + ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 + ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 + ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 + ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 + ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 + ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 + ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 + ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 + ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 + ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 + ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 + ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 + ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 + ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 + ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 + ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 + ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 + ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 + ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 + ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 + ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 + ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 + ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 + ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 + ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 + ERROR_EC_LOG_DISABLED syscall.Errno = 15081 + ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 + ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 + ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 + ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 + ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 + ERROR_MUI_INVALID_FILE syscall.Errno = 15101 + ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 + ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 + ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 + ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 + ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 + ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 + ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 + ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 + ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 + ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 + ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 + ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 + ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 + ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 + ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 + ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 + ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 + ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 + ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 + ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 + ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 + ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 + ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 + ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 + ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 + ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 + ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 + ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 + ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 + ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 + ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 + ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 + ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 + ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 + ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 + ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 + ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 + ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 + ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 + ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 + ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 + ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 + ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 + ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 + ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 + ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 + ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 + ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 + ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 + ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 + ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 + ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 + ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 + ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 + ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 + ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 + ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 + ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 + ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 + ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 + ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 + ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 + ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 + ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 + ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 + ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 + ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 + ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 + ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 + ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 + ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 + ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 + ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 + ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 + ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 + ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 + ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 + ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 + ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 + ERROR_INSTALL_CANCEL syscall.Errno = 15608 + ERROR_INSTALL_FAILED syscall.Errno = 15609 + ERROR_REMOVE_FAILED syscall.Errno = 15610 + ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 + ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 + ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 + ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 + ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 + ERROR_PACKAGE_UPDATING syscall.Errno = 15616 + ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 + ERROR_PACKAGES_IN_USE syscall.Errno = 15618 + ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 + ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 + ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 + ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 + ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 + ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 + ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 + ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 + ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 + ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 + ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 + ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 + ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 + ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 + ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 + ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 + ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 + ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 + ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 + ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 + ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 + ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 + ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 + ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 + ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 + ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 + ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 + APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 + APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 + APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 + APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 + APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 + APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 + APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 + ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 + ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 + ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 + ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 + ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 + ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 + ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 + ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 + ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 + ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 + ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 + ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 + ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 + ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 + ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 + ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 + ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 + ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 + ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 + ERROR_API_UNAVAILABLE syscall.Errno = 15841 + STORE_ERROR_UNLICENSED syscall.Errno = 15861 + STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 + STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 + STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 + SEVERITY_SUCCESS syscall.Errno = 0 + SEVERITY_ERROR syscall.Errno = 1 + FACILITY_NT_BIT = 0x10000000 + E_NOT_SET = ERROR_NOT_FOUND + E_NOT_VALID_STATE = ERROR_INVALID_STATE + E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER + E_TIME_CRITICAL_THREAD = ERROR_TIME_CRITICAL_THREAD + NOERROR syscall.Errno = 0 + E_UNEXPECTED Handle = 0x8000FFFF + E_NOTIMPL Handle = 0x80004001 + E_OUTOFMEMORY Handle = 0x8007000E + E_INVALIDARG Handle = 0x80070057 + E_NOINTERFACE Handle = 0x80004002 + E_POINTER Handle = 0x80004003 + E_HANDLE Handle = 0x80070006 + E_ABORT Handle = 0x80004004 + E_FAIL Handle = 0x80004005 + E_ACCESSDENIED Handle = 0x80070005 + E_PENDING Handle = 0x8000000A + E_BOUNDS Handle = 0x8000000B + E_CHANGED_STATE Handle = 0x8000000C + E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D + E_ILLEGAL_METHOD_CALL Handle = 0x8000000E + RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F + RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 + RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 + RO_E_INVALID_METADATA_FILE Handle = 0x80000012 + RO_E_CLOSED Handle = 0x80000013 + RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 + RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 + RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 + E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 + E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 + E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 + E_APPLICATION_EXITING Handle = 0x8000001A + E_APPLICATION_VIEW_EXITING Handle = 0x8000001B + RO_E_MUST_BE_AGILE Handle = 0x8000001C + RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D + RO_E_COMMITTED Handle = 0x8000001E + RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F + RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 + RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 + CO_E_INIT_TLS Handle = 0x80004006 + CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 + CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 + CO_E_INIT_CLASS_CACHE Handle = 0x80004009 + CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A + CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B + CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C + CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D + CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E + CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F + CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 + CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 + CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 + CO_E_CANT_REMOTE Handle = 0x80004013 + CO_E_BAD_SERVER_NAME Handle = 0x80004014 + CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 + CO_E_OLE1DDE_DISABLED Handle = 0x80004016 + CO_E_RUNAS_SYNTAX Handle = 0x80004017 + CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 + CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 + CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A + CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B + CO_E_START_SERVICE_FAILURE Handle = 0x8000401C + CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D + CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E + CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F + CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 + CO_E_NOT_SUPPORTED Handle = 0x80004021 + CO_E_RELOAD_DLL Handle = 0x80004022 + CO_E_MSI_ERROR Handle = 0x80004023 + CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 + CO_E_SERVER_PAUSED Handle = 0x80004025 + CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 + CO_E_CLASS_DISABLED Handle = 0x80004027 + CO_E_CLRNOTAVAILABLE Handle = 0x80004028 + CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 + CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A + CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B + CO_E_TRACKER_CONFIG Handle = 0x80004030 + CO_E_THREADPOOL_CONFIG Handle = 0x80004031 + CO_E_SXS_CONFIG Handle = 0x80004032 + CO_E_MALFORMED_SPN Handle = 0x80004033 + CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 + CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 + S_OK Handle = 0 + S_FALSE Handle = 1 + OLE_E_FIRST Handle = 0x80040000 + OLE_E_LAST Handle = 0x800400FF + OLE_S_FIRST Handle = 0x00040000 + OLE_S_LAST Handle = 0x000400FF + OLE_E_OLEVERB Handle = 0x80040000 + OLE_E_ADVF Handle = 0x80040001 + OLE_E_ENUM_NOMORE Handle = 0x80040002 + OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 + OLE_E_NOCONNECTION Handle = 0x80040004 + OLE_E_NOTRUNNING Handle = 0x80040005 + OLE_E_NOCACHE Handle = 0x80040006 + OLE_E_BLANK Handle = 0x80040007 + OLE_E_CLASSDIFF Handle = 0x80040008 + OLE_E_CANT_GETMONIKER Handle = 0x80040009 + OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A + OLE_E_STATIC Handle = 0x8004000B + OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C + OLE_E_INVALIDRECT Handle = 0x8004000D + OLE_E_WRONGCOMPOBJ Handle = 0x8004000E + OLE_E_INVALIDHWND Handle = 0x8004000F + OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 + OLE_E_CANTCONVERT Handle = 0x80040011 + OLE_E_NOSTORAGE Handle = 0x80040012 + DV_E_FORMATETC Handle = 0x80040064 + DV_E_DVTARGETDEVICE Handle = 0x80040065 + DV_E_STGMEDIUM Handle = 0x80040066 + DV_E_STATDATA Handle = 0x80040067 + DV_E_LINDEX Handle = 0x80040068 + DV_E_TYMED Handle = 0x80040069 + DV_E_CLIPFORMAT Handle = 0x8004006A + DV_E_DVASPECT Handle = 0x8004006B + DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C + DV_E_NOIVIEWOBJECT Handle = 0x8004006D + DRAGDROP_E_FIRST syscall.Errno = 0x80040100 + DRAGDROP_E_LAST syscall.Errno = 0x8004010F + DRAGDROP_S_FIRST syscall.Errno = 0x00040100 + DRAGDROP_S_LAST syscall.Errno = 0x0004010F + DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 + DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 + DRAGDROP_E_INVALIDHWND Handle = 0x80040102 + DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 + CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 + CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F + CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 + CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F + CLASS_E_NOAGGREGATION Handle = 0x80040110 + CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 + CLASS_E_NOTLICENSED Handle = 0x80040112 + MARSHAL_E_FIRST syscall.Errno = 0x80040120 + MARSHAL_E_LAST syscall.Errno = 0x8004012F + MARSHAL_S_FIRST syscall.Errno = 0x00040120 + MARSHAL_S_LAST syscall.Errno = 0x0004012F + DATA_E_FIRST syscall.Errno = 0x80040130 + DATA_E_LAST syscall.Errno = 0x8004013F + DATA_S_FIRST syscall.Errno = 0x00040130 + DATA_S_LAST syscall.Errno = 0x0004013F + VIEW_E_FIRST syscall.Errno = 0x80040140 + VIEW_E_LAST syscall.Errno = 0x8004014F + VIEW_S_FIRST syscall.Errno = 0x00040140 + VIEW_S_LAST syscall.Errno = 0x0004014F + VIEW_E_DRAW Handle = 0x80040140 + REGDB_E_FIRST syscall.Errno = 0x80040150 + REGDB_E_LAST syscall.Errno = 0x8004015F + REGDB_S_FIRST syscall.Errno = 0x00040150 + REGDB_S_LAST syscall.Errno = 0x0004015F + REGDB_E_READREGDB Handle = 0x80040150 + REGDB_E_WRITEREGDB Handle = 0x80040151 + REGDB_E_KEYMISSING Handle = 0x80040152 + REGDB_E_INVALIDVALUE Handle = 0x80040153 + REGDB_E_CLASSNOTREG Handle = 0x80040154 + REGDB_E_IIDNOTREG Handle = 0x80040155 + REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 + REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 + CAT_E_FIRST syscall.Errno = 0x80040160 + CAT_E_LAST syscall.Errno = 0x80040161 + CAT_E_CATIDNOEXIST Handle = 0x80040160 + CAT_E_NODESCRIPTION Handle = 0x80040161 + CS_E_FIRST syscall.Errno = 0x80040164 + CS_E_LAST syscall.Errno = 0x8004016F + CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 + CS_E_NOT_DELETABLE Handle = 0x80040165 + CS_E_CLASS_NOTFOUND Handle = 0x80040166 + CS_E_INVALID_VERSION Handle = 0x80040167 + CS_E_NO_CLASSSTORE Handle = 0x80040168 + CS_E_OBJECT_NOTFOUND Handle = 0x80040169 + CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A + CS_E_INVALID_PATH Handle = 0x8004016B + CS_E_NETWORK_ERROR Handle = 0x8004016C + CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D + CS_E_SCHEMA_MISMATCH Handle = 0x8004016E + CS_E_INTERNAL_ERROR Handle = 0x8004016F + CACHE_E_FIRST syscall.Errno = 0x80040170 + CACHE_E_LAST syscall.Errno = 0x8004017F + CACHE_S_FIRST syscall.Errno = 0x00040170 + CACHE_S_LAST syscall.Errno = 0x0004017F + CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 + OLEOBJ_E_FIRST syscall.Errno = 0x80040180 + OLEOBJ_E_LAST syscall.Errno = 0x8004018F + OLEOBJ_S_FIRST syscall.Errno = 0x00040180 + OLEOBJ_S_LAST syscall.Errno = 0x0004018F + OLEOBJ_E_NOVERBS Handle = 0x80040180 + OLEOBJ_E_INVALIDVERB Handle = 0x80040181 + CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 + CLIENTSITE_E_LAST syscall.Errno = 0x8004019F + CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 + CLIENTSITE_S_LAST syscall.Errno = 0x0004019F + INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 + INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 + INPLACE_E_FIRST syscall.Errno = 0x800401A0 + INPLACE_E_LAST syscall.Errno = 0x800401AF + INPLACE_S_FIRST syscall.Errno = 0x000401A0 + INPLACE_S_LAST syscall.Errno = 0x000401AF + ENUM_E_FIRST syscall.Errno = 0x800401B0 + ENUM_E_LAST syscall.Errno = 0x800401BF + ENUM_S_FIRST syscall.Errno = 0x000401B0 + ENUM_S_LAST syscall.Errno = 0x000401BF + CONVERT10_E_FIRST syscall.Errno = 0x800401C0 + CONVERT10_E_LAST syscall.Errno = 0x800401CF + CONVERT10_S_FIRST syscall.Errno = 0x000401C0 + CONVERT10_S_LAST syscall.Errno = 0x000401CF + CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 + CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 + CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 + CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 + CONVERT10_E_STG_FMT Handle = 0x800401C4 + CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 + CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 + CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 + CLIPBRD_E_LAST syscall.Errno = 0x800401DF + CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 + CLIPBRD_S_LAST syscall.Errno = 0x000401DF + CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 + CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 + CLIPBRD_E_CANT_SET Handle = 0x800401D2 + CLIPBRD_E_BAD_DATA Handle = 0x800401D3 + CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 + MK_E_FIRST syscall.Errno = 0x800401E0 + MK_E_LAST syscall.Errno = 0x800401EF + MK_S_FIRST syscall.Errno = 0x000401E0 + MK_S_LAST syscall.Errno = 0x000401EF + MK_E_CONNECTMANUALLY Handle = 0x800401E0 + MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 + MK_E_NEEDGENERIC Handle = 0x800401E2 + MK_E_UNAVAILABLE Handle = 0x800401E3 + MK_E_SYNTAX Handle = 0x800401E4 + MK_E_NOOBJECT Handle = 0x800401E5 + MK_E_INVALIDEXTENSION Handle = 0x800401E6 + MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 + MK_E_NOTBINDABLE Handle = 0x800401E8 + MK_E_NOTBOUND Handle = 0x800401E9 + MK_E_CANTOPENFILE Handle = 0x800401EA + MK_E_MUSTBOTHERUSER Handle = 0x800401EB + MK_E_NOINVERSE Handle = 0x800401EC + MK_E_NOSTORAGE Handle = 0x800401ED + MK_E_NOPREFIX Handle = 0x800401EE + MK_E_ENUMERATION_FAILED Handle = 0x800401EF + CO_E_FIRST syscall.Errno = 0x800401F0 + CO_E_LAST syscall.Errno = 0x800401FF + CO_S_FIRST syscall.Errno = 0x000401F0 + CO_S_LAST syscall.Errno = 0x000401FF + CO_E_NOTINITIALIZED Handle = 0x800401F0 + CO_E_ALREADYINITIALIZED Handle = 0x800401F1 + CO_E_CANTDETERMINECLASS Handle = 0x800401F2 + CO_E_CLASSSTRING Handle = 0x800401F3 + CO_E_IIDSTRING Handle = 0x800401F4 + CO_E_APPNOTFOUND Handle = 0x800401F5 + CO_E_APPSINGLEUSE Handle = 0x800401F6 + CO_E_ERRORINAPP Handle = 0x800401F7 + CO_E_DLLNOTFOUND Handle = 0x800401F8 + CO_E_ERRORINDLL Handle = 0x800401F9 + CO_E_WRONGOSFORAPP Handle = 0x800401FA + CO_E_OBJNOTREG Handle = 0x800401FB + CO_E_OBJISREG Handle = 0x800401FC + CO_E_OBJNOTCONNECTED Handle = 0x800401FD + CO_E_APPDIDNTREG Handle = 0x800401FE + CO_E_RELEASED Handle = 0x800401FF + EVENT_E_FIRST syscall.Errno = 0x80040200 + EVENT_E_LAST syscall.Errno = 0x8004021F + EVENT_S_FIRST syscall.Errno = 0x00040200 + EVENT_S_LAST syscall.Errno = 0x0004021F + EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 + EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 + EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 + EVENT_E_QUERYSYNTAX Handle = 0x80040203 + EVENT_E_QUERYFIELD Handle = 0x80040204 + EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 + EVENT_E_INTERNALERROR Handle = 0x80040206 + EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 + EVENT_E_USER_EXCEPTION Handle = 0x80040208 + EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 + EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A + EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B + EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C + EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D + EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E + EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F + EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 + TPC_E_INVALID_PROPERTY Handle = 0x80040241 + TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 + TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B + TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 + TPC_E_INVALID_STROKE Handle = 0x80040222 + TPC_E_INITIALIZE_FAIL Handle = 0x80040223 + TPC_E_NOT_RELEVANT Handle = 0x80040232 + TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 + TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 + TPC_E_INVALID_RIGHTS Handle = 0x80040236 + TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 + TPC_E_QUEUE_FULL Handle = 0x80040238 + TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 + TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A + TPC_S_TRUNCATED Handle = 0x00040252 + TPC_S_INTERRUPTED Handle = 0x00040253 + TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 + XACT_E_FIRST syscall.Errno = 0x8004D000 + XACT_E_LAST syscall.Errno = 0x8004D02B + XACT_S_FIRST syscall.Errno = 0x0004D000 + XACT_S_LAST syscall.Errno = 0x0004D010 + XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 + XACT_E_CANTRETAIN Handle = 0x8004D001 + XACT_E_COMMITFAILED Handle = 0x8004D002 + XACT_E_COMMITPREVENTED Handle = 0x8004D003 + XACT_E_HEURISTICABORT Handle = 0x8004D004 + XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 + XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 + XACT_E_HEURISTICDANGER Handle = 0x8004D007 + XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 + XACT_E_NOASYNC Handle = 0x8004D009 + XACT_E_NOENLIST Handle = 0x8004D00A + XACT_E_NOISORETAIN Handle = 0x8004D00B + XACT_E_NORESOURCE Handle = 0x8004D00C + XACT_E_NOTCURRENT Handle = 0x8004D00D + XACT_E_NOTRANSACTION Handle = 0x8004D00E + XACT_E_NOTSUPPORTED Handle = 0x8004D00F + XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 + XACT_E_WRONGSTATE Handle = 0x8004D011 + XACT_E_WRONGUOW Handle = 0x8004D012 + XACT_E_XTIONEXISTS Handle = 0x8004D013 + XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 + XACT_E_INVALIDCOOKIE Handle = 0x8004D015 + XACT_E_INDOUBT Handle = 0x8004D016 + XACT_E_NOTIMEOUT Handle = 0x8004D017 + XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 + XACT_E_ABORTED Handle = 0x8004D019 + XACT_E_LOGFULL Handle = 0x8004D01A + XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B + XACT_E_CONNECTION_DOWN Handle = 0x8004D01C + XACT_E_CONNECTION_DENIED Handle = 0x8004D01D + XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E + XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F + XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 + XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 + XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 + XACT_E_TIP_DISABLED Handle = 0x8004D023 + XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 + XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 + XACT_E_XA_TX_DISABLED Handle = 0x8004D026 + XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 + XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 + XACT_E_ABORTING Handle = 0x8004D029 + XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A + XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B + XACT_E_LU_TX_DISABLED Handle = 0x8004D02C + XACT_E_CLERKNOTFOUND Handle = 0x8004D080 + XACT_E_CLERKEXISTS Handle = 0x8004D081 + XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 + XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 + XACT_E_INVALIDLSN Handle = 0x8004D084 + XACT_E_REPLAYREQUEST Handle = 0x8004D085 + XACT_S_ASYNC Handle = 0x0004D000 + XACT_S_DEFECT Handle = 0x0004D001 + XACT_S_READONLY Handle = 0x0004D002 + XACT_S_SOMENORETAIN Handle = 0x0004D003 + XACT_S_OKINFORM Handle = 0x0004D004 + XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 + XACT_S_MADECHANGESINFORM Handle = 0x0004D006 + XACT_S_ALLNORETAIN Handle = 0x0004D007 + XACT_S_ABORTING Handle = 0x0004D008 + XACT_S_SINGLEPHASE Handle = 0x0004D009 + XACT_S_LOCALLY_OK Handle = 0x0004D00A + XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 + CONTEXT_E_FIRST syscall.Errno = 0x8004E000 + CONTEXT_E_LAST syscall.Errno = 0x8004E02F + CONTEXT_S_FIRST syscall.Errno = 0x0004E000 + CONTEXT_S_LAST syscall.Errno = 0x0004E02F + CONTEXT_E_ABORTED Handle = 0x8004E002 + CONTEXT_E_ABORTING Handle = 0x8004E003 + CONTEXT_E_NOCONTEXT Handle = 0x8004E004 + CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 + CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 + CONTEXT_E_OLDREF Handle = 0x8004E007 + CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C + CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F + CO_E_ACTIVATIONFAILED Handle = 0x8004E021 + CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 + CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 + CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 + CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 + CONTEXT_E_NOJIT Handle = 0x8004E026 + CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 + CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 + CO_E_NOIISINTRINSICS Handle = 0x8004E029 + CO_E_NOCOOKIES Handle = 0x8004E02A + CO_E_DBERROR Handle = 0x8004E02B + CO_E_NOTPOOLED Handle = 0x8004E02C + CO_E_NOTCONSTRUCTED Handle = 0x8004E02D + CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E + CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F + CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 + CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 + OLE_S_USEREG Handle = 0x00040000 + OLE_S_STATIC Handle = 0x00040001 + OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 + DRAGDROP_S_DROP Handle = 0x00040100 + DRAGDROP_S_CANCEL Handle = 0x00040101 + DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 + DATA_S_SAMEFORMATETC Handle = 0x00040130 + VIEW_S_ALREADY_FROZEN Handle = 0x00040140 + CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 + CACHE_S_SAMECACHE Handle = 0x00040171 + CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 + OLEOBJ_S_INVALIDVERB Handle = 0x00040180 + OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 + OLEOBJ_S_INVALIDHWND Handle = 0x00040182 + INPLACE_S_TRUNCATED Handle = 0x000401A0 + CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 + MK_S_REDUCED_TO_SELF Handle = 0x000401E2 + MK_S_ME Handle = 0x000401E4 + MK_S_HIM Handle = 0x000401E5 + MK_S_US Handle = 0x000401E6 + MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 + SCHED_S_TASK_READY Handle = 0x00041300 + SCHED_S_TASK_RUNNING Handle = 0x00041301 + SCHED_S_TASK_DISABLED Handle = 0x00041302 + SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 + SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 + SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 + SCHED_S_TASK_TERMINATED Handle = 0x00041306 + SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 + SCHED_S_EVENT_TRIGGER Handle = 0x00041308 + SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 + SCHED_E_TASK_NOT_READY Handle = 0x8004130A + SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B + SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C + SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D + SCHED_E_INVALID_TASK Handle = 0x8004130E + SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F + SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 + SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 + SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 + SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 + SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 + SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 + SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 + SCHED_E_NAMESPACE Handle = 0x80041317 + SCHED_E_INVALIDVALUE Handle = 0x80041318 + SCHED_E_MISSINGNODE Handle = 0x80041319 + SCHED_E_MALFORMEDXML Handle = 0x8004131A + SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B + SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C + SCHED_E_TOO_MANY_NODES Handle = 0x8004131D + SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E + SCHED_E_ALREADY_RUNNING Handle = 0x8004131F + SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 + SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 + SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 + SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 + SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 + SCHED_S_TASK_QUEUED Handle = 0x00041325 + SCHED_E_TASK_DISABLED Handle = 0x80041326 + SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 + SCHED_E_START_ON_DEMAND Handle = 0x80041328 + SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 + SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 + CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 + CO_E_SCM_ERROR Handle = 0x80080002 + CO_E_SCM_RPC_FAILURE Handle = 0x80080003 + CO_E_BAD_PATH Handle = 0x80080004 + CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 + CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 + MK_E_NO_NORMALIZED Handle = 0x80080007 + CO_E_SERVER_STOPPING Handle = 0x80080008 + MEM_E_INVALID_ROOT Handle = 0x80080009 + MEM_E_INVALID_LINK Handle = 0x80080010 + MEM_E_INVALID_SIZE Handle = 0x80080011 + CO_S_NOTALLINTERFACES Handle = 0x00080012 + CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 + CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 + CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 + CO_E_ELEVATION_DISABLED Handle = 0x80080017 + APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 + APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 + APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 + APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 + APPX_E_INVALID_MANIFEST Handle = 0x80080204 + APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 + APPX_E_CORRUPT_CONTENT Handle = 0x80080206 + APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 + APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 + APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 + APPX_E_INVALID_KEY_INFO Handle = 0x8008020A + APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B + APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C + APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D + APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E + APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F + APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 + APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 + APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 + APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 + APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 + APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 + APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 + BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 + DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 + DISP_E_MEMBERNOTFOUND Handle = 0x80020003 + DISP_E_PARAMNOTFOUND Handle = 0x80020004 + DISP_E_TYPEMISMATCH Handle = 0x80020005 + DISP_E_UNKNOWNNAME Handle = 0x80020006 + DISP_E_NONAMEDARGS Handle = 0x80020007 + DISP_E_BADVARTYPE Handle = 0x80020008 + DISP_E_EXCEPTION Handle = 0x80020009 + DISP_E_OVERFLOW Handle = 0x8002000A + DISP_E_BADINDEX Handle = 0x8002000B + DISP_E_UNKNOWNLCID Handle = 0x8002000C + DISP_E_ARRAYISLOCKED Handle = 0x8002000D + DISP_E_BADPARAMCOUNT Handle = 0x8002000E + DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F + DISP_E_BADCALLEE Handle = 0x80020010 + DISP_E_NOTACOLLECTION Handle = 0x80020011 + DISP_E_DIVBYZERO Handle = 0x80020012 + DISP_E_BUFFERTOOSMALL Handle = 0x80020013 + TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 + TYPE_E_FIELDNOTFOUND Handle = 0x80028017 + TYPE_E_INVDATAREAD Handle = 0x80028018 + TYPE_E_UNSUPFORMAT Handle = 0x80028019 + TYPE_E_REGISTRYACCESS Handle = 0x8002801C + TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D + TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 + TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 + TYPE_E_INVALIDSTATE Handle = 0x80028029 + TYPE_E_WRONGTYPEKIND Handle = 0x8002802A + TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B + TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C + TYPE_E_NAMECONFLICT Handle = 0x8002802D + TYPE_E_UNKNOWNLCID Handle = 0x8002802E + TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F + TYPE_E_BADMODULEKIND Handle = 0x800288BD + TYPE_E_SIZETOOBIG Handle = 0x800288C5 + TYPE_E_DUPLICATEID Handle = 0x800288C6 + TYPE_E_INVALIDID Handle = 0x800288CF + TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 + TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 + TYPE_E_IOERROR Handle = 0x80028CA2 + TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 + TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A + TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 + TYPE_E_CIRCULARTYPE Handle = 0x80029C84 + STG_E_INVALIDFUNCTION Handle = 0x80030001 + STG_E_FILENOTFOUND Handle = 0x80030002 + STG_E_PATHNOTFOUND Handle = 0x80030003 + STG_E_TOOMANYOPENFILES Handle = 0x80030004 + STG_E_ACCESSDENIED Handle = 0x80030005 + STG_E_INVALIDHANDLE Handle = 0x80030006 + STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 + STG_E_INVALIDPOINTER Handle = 0x80030009 + STG_E_NOMOREFILES Handle = 0x80030012 + STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 + STG_E_SEEKERROR Handle = 0x80030019 + STG_E_WRITEFAULT Handle = 0x8003001D + STG_E_READFAULT Handle = 0x8003001E + STG_E_SHAREVIOLATION Handle = 0x80030020 + STG_E_LOCKVIOLATION Handle = 0x80030021 + STG_E_FILEALREADYEXISTS Handle = 0x80030050 + STG_E_INVALIDPARAMETER Handle = 0x80030057 + STG_E_MEDIUMFULL Handle = 0x80030070 + STG_E_PROPSETMISMATCHED Handle = 0x800300F0 + STG_E_ABNORMALAPIEXIT Handle = 0x800300FA + STG_E_INVALIDHEADER Handle = 0x800300FB + STG_E_INVALIDNAME Handle = 0x800300FC + STG_E_UNKNOWN Handle = 0x800300FD + STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE + STG_E_INVALIDFLAG Handle = 0x800300FF + STG_E_INUSE Handle = 0x80030100 + STG_E_NOTCURRENT Handle = 0x80030101 + STG_E_REVERTED Handle = 0x80030102 + STG_E_CANTSAVE Handle = 0x80030103 + STG_E_OLDFORMAT Handle = 0x80030104 + STG_E_OLDDLL Handle = 0x80030105 + STG_E_SHAREREQUIRED Handle = 0x80030106 + STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 + STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 + STG_E_DOCFILECORRUPT Handle = 0x80030109 + STG_E_BADBASEADDRESS Handle = 0x80030110 + STG_E_DOCFILETOOLARGE Handle = 0x80030111 + STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 + STG_E_INCOMPLETE Handle = 0x80030201 + STG_E_TERMINATED Handle = 0x80030202 + STG_S_CONVERTED Handle = 0x00030200 + STG_S_BLOCK Handle = 0x00030201 + STG_S_RETRYNOW Handle = 0x00030202 + STG_S_MONITORING Handle = 0x00030203 + STG_S_MULTIPLEOPENS Handle = 0x00030204 + STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 + STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 + STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 + STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 + STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 + STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A + STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 + STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 + STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 + STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 + STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 + STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A + STG_E_RESETS_EXHAUSTED Handle = 0x8003030B + RPC_E_CALL_REJECTED Handle = 0x80010001 + RPC_E_CALL_CANCELED Handle = 0x80010002 + RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 + RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 + RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 + RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 + RPC_E_SERVER_DIED Handle = 0x80010007 + RPC_E_CLIENT_DIED Handle = 0x80010008 + RPC_E_INVALID_DATAPACKET Handle = 0x80010009 + RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A + RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B + RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C + RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D + RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E + RPC_E_INVALID_DATA Handle = 0x8001000F + RPC_E_INVALID_PARAMETER Handle = 0x80010010 + RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 + RPC_E_SERVER_DIED_DNE Handle = 0x80010012 + RPC_E_SYS_CALL_FAILED Handle = 0x80010100 + RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 + RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 + RPC_E_NOT_REGISTERED Handle = 0x80010103 + RPC_E_FAULT Handle = 0x80010104 + RPC_E_SERVERFAULT Handle = 0x80010105 + RPC_E_CHANGED_MODE Handle = 0x80010106 + RPC_E_INVALIDMETHOD Handle = 0x80010107 + RPC_E_DISCONNECTED Handle = 0x80010108 + RPC_E_RETRY Handle = 0x80010109 + RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A + RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B + RPC_E_INVALID_CALLDATA Handle = 0x8001010C + RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D + RPC_E_WRONG_THREAD Handle = 0x8001010E + RPC_E_THREAD_NOT_INIT Handle = 0x8001010F + RPC_E_VERSION_MISMATCH Handle = 0x80010110 + RPC_E_INVALID_HEADER Handle = 0x80010111 + RPC_E_INVALID_EXTENSION Handle = 0x80010112 + RPC_E_INVALID_IPID Handle = 0x80010113 + RPC_E_INVALID_OBJECT Handle = 0x80010114 + RPC_S_CALLPENDING Handle = 0x80010115 + RPC_S_WAITONTIMER Handle = 0x80010116 + RPC_E_CALL_COMPLETE Handle = 0x80010117 + RPC_E_UNSECURE_CALL Handle = 0x80010118 + RPC_E_TOO_LATE Handle = 0x80010119 + RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A + RPC_E_ACCESS_DENIED Handle = 0x8001011B + RPC_E_REMOTE_DISABLED Handle = 0x8001011C + RPC_E_INVALID_OBJREF Handle = 0x8001011D + RPC_E_NO_CONTEXT Handle = 0x8001011E + RPC_E_TIMEOUT Handle = 0x8001011F + RPC_E_NO_SYNC Handle = 0x80010120 + RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 + RPC_E_INVALID_STD_NAME Handle = 0x80010122 + CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 + CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 + CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 + CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 + CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 + CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 + CO_E_FAILEDTOSETDACL Handle = 0x80010129 + CO_E_ACCESSCHECKFAILED Handle = 0x8001012A + CO_E_NETACCESSAPIFAILED Handle = 0x8001012B + CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C + CO_E_INVALIDSID Handle = 0x8001012D + CO_E_CONVERSIONFAILED Handle = 0x8001012E + CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F + CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 + CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 + CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 + CO_E_SETSERLHNDLFAILED Handle = 0x80010133 + CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 + CO_E_PATHTOOLONG Handle = 0x80010135 + CO_E_FAILEDTOGENUUID Handle = 0x80010136 + CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 + CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 + CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 + CO_E_ACESINWRONGORDER Handle = 0x8001013A + CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B + CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C + CO_E_DECODEFAILED Handle = 0x8001013D + CO_E_ACNOTINITIALIZED Handle = 0x8001013F + CO_E_CANCEL_DISABLED Handle = 0x80010140 + RPC_E_UNEXPECTED Handle = 0x8001FFFF + ERROR_AUDITING_DISABLED Handle = 0xC0090001 + ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 + ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 + NTE_BAD_UID Handle = 0x80090001 + NTE_BAD_HASH Handle = 0x80090002 + NTE_BAD_KEY Handle = 0x80090003 + NTE_BAD_LEN Handle = 0x80090004 + NTE_BAD_DATA Handle = 0x80090005 + NTE_BAD_SIGNATURE Handle = 0x80090006 + NTE_BAD_VER Handle = 0x80090007 + NTE_BAD_ALGID Handle = 0x80090008 + NTE_BAD_FLAGS Handle = 0x80090009 + NTE_BAD_TYPE Handle = 0x8009000A + NTE_BAD_KEY_STATE Handle = 0x8009000B + NTE_BAD_HASH_STATE Handle = 0x8009000C + NTE_NO_KEY Handle = 0x8009000D + NTE_NO_MEMORY Handle = 0x8009000E + NTE_EXISTS Handle = 0x8009000F + NTE_PERM Handle = 0x80090010 + NTE_NOT_FOUND Handle = 0x80090011 + NTE_DOUBLE_ENCRYPT Handle = 0x80090012 + NTE_BAD_PROVIDER Handle = 0x80090013 + NTE_BAD_PROV_TYPE Handle = 0x80090014 + NTE_BAD_PUBLIC_KEY Handle = 0x80090015 + NTE_BAD_KEYSET Handle = 0x80090016 + NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 + NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 + NTE_KEYSET_NOT_DEF Handle = 0x80090019 + NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A + NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B + NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C + NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D + NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E + NTE_BAD_KEYSET_PARAM Handle = 0x8009001F + NTE_FAIL Handle = 0x80090020 + NTE_SYS_ERR Handle = 0x80090021 + NTE_SILENT_CONTEXT Handle = 0x80090022 + NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 + NTE_TEMPORARY_PROFILE Handle = 0x80090024 + NTE_FIXEDPARAMETER Handle = 0x80090025 + NTE_INVALID_HANDLE Handle = 0x80090026 + NTE_INVALID_PARAMETER Handle = 0x80090027 + NTE_BUFFER_TOO_SMALL Handle = 0x80090028 + NTE_NOT_SUPPORTED Handle = 0x80090029 + NTE_NO_MORE_ITEMS Handle = 0x8009002A + NTE_BUFFERS_OVERLAP Handle = 0x8009002B + NTE_DECRYPTION_FAILURE Handle = 0x8009002C + NTE_INTERNAL_ERROR Handle = 0x8009002D + NTE_UI_REQUIRED Handle = 0x8009002E + NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F + NTE_DEVICE_NOT_READY Handle = 0x80090030 + NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 + NTE_VALIDATION_FAILED Handle = 0x80090032 + NTE_INCORRECT_PASSWORD Handle = 0x80090033 + NTE_ENCRYPTION_FAILURE Handle = 0x80090034 + NTE_DEVICE_NOT_FOUND Handle = 0x80090035 + NTE_USER_CANCELLED Handle = 0x80090036 + NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 + NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 + SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 + SEC_E_INVALID_HANDLE Handle = 0x80090301 + SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 + SEC_E_TARGET_UNKNOWN Handle = 0x80090303 + SEC_E_INTERNAL_ERROR Handle = 0x80090304 + SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 + SEC_E_NOT_OWNER Handle = 0x80090306 + SEC_E_CANNOT_INSTALL Handle = 0x80090307 + SEC_E_INVALID_TOKEN Handle = 0x80090308 + SEC_E_CANNOT_PACK Handle = 0x80090309 + SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A + SEC_E_NO_IMPERSONATION Handle = 0x8009030B + SEC_E_LOGON_DENIED Handle = 0x8009030C + SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D + SEC_E_NO_CREDENTIALS Handle = 0x8009030E + SEC_E_MESSAGE_ALTERED Handle = 0x8009030F + SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 + SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 + SEC_I_CONTINUE_NEEDED Handle = 0x00090312 + SEC_I_COMPLETE_NEEDED Handle = 0x00090313 + SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 + SEC_I_LOCAL_LOGON Handle = 0x00090315 + SEC_E_BAD_PKGID Handle = 0x80090316 + SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 + SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 + SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 + SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 + SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 + SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 + SEC_I_RENEGOTIATE Handle = 0x00090321 + SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 + SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 + SEC_E_TIME_SKEW Handle = 0x80090324 + SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 + SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 + SEC_E_CERT_UNKNOWN Handle = 0x80090327 + SEC_E_CERT_EXPIRED Handle = 0x80090328 + SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 + SEC_E_DECRYPT_FAILURE Handle = 0x80090330 + SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 + SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 + SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 + SEC_E_NO_TGT_REPLY Handle = 0x80090334 + SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 + SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 + SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 + SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 + SEC_E_MUST_BE_KDC Handle = 0x80090339 + SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A + SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B + SEC_E_NO_PA_DATA Handle = 0x8009033C + SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D + SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E + SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F + SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 + SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 + SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 + SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 + SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 + SEC_E_BAD_BINDINGS Handle = 0x80090346 + SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 + SEC_E_NO_KERB_KEY Handle = 0x80090348 + SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 + SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 + SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 + SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 + SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 + SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 + SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 + SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 + SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 + SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 + SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 + SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A + SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B + SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C + SEC_E_INVALID_PARAMETER Handle = 0x8009035D + SEC_E_DELEGATION_POLICY Handle = 0x8009035E + SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F + SEC_I_NO_RENEGOTIATION Handle = 0x00090360 + SEC_E_NO_CONTEXT Handle = 0x80090361 + SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 + SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 + SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 + SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 + SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 + SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 + SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 + SEC_E_INVALID_UPN_NAME Handle = 0x80090369 + SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR + SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION + CRYPT_E_MSG_ERROR Handle = 0x80091001 + CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 + CRYPT_E_OID_FORMAT Handle = 0x80091003 + CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 + CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 + CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 + CRYPT_E_HASH_VALUE Handle = 0x80091007 + CRYPT_E_INVALID_INDEX Handle = 0x80091008 + CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 + CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A + CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B + CRYPT_E_CONTROL_TYPE Handle = 0x8009100C + CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D + CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E + CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F + CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 + CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 + CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 + CRYPT_E_BAD_LEN Handle = 0x80092001 + CRYPT_E_BAD_ENCODE Handle = 0x80092002 + CRYPT_E_FILE_ERROR Handle = 0x80092003 + CRYPT_E_NOT_FOUND Handle = 0x80092004 + CRYPT_E_EXISTS Handle = 0x80092005 + CRYPT_E_NO_PROVIDER Handle = 0x80092006 + CRYPT_E_SELF_SIGNED Handle = 0x80092007 + CRYPT_E_DELETED_PREV Handle = 0x80092008 + CRYPT_E_NO_MATCH Handle = 0x80092009 + CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A + CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B + CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C + CRYPT_E_BAD_MSG Handle = 0x8009200D + CRYPT_E_NO_SIGNER Handle = 0x8009200E + CRYPT_E_PENDING_CLOSE Handle = 0x8009200F + CRYPT_E_REVOKED Handle = 0x80092010 + CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 + CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 + CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 + CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 + CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 + CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 + CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 + CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 + CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 + CRYPT_E_FILERESIZED Handle = 0x80092025 + CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 + CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 + CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 + CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 + CRYPT_E_NOT_IN_CTL Handle = 0x8009202A + CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B + CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C + CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D + CRYPT_E_OSS_ERROR Handle = 0x80093000 + OSS_MORE_BUF Handle = 0x80093001 + OSS_NEGATIVE_UINTEGER Handle = 0x80093002 + OSS_PDU_RANGE Handle = 0x80093003 + OSS_MORE_INPUT Handle = 0x80093004 + OSS_DATA_ERROR Handle = 0x80093005 + OSS_BAD_ARG Handle = 0x80093006 + OSS_BAD_VERSION Handle = 0x80093007 + OSS_OUT_MEMORY Handle = 0x80093008 + OSS_PDU_MISMATCH Handle = 0x80093009 + OSS_LIMITED Handle = 0x8009300A + OSS_BAD_PTR Handle = 0x8009300B + OSS_BAD_TIME Handle = 0x8009300C + OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D + OSS_MEM_ERROR Handle = 0x8009300E + OSS_BAD_TABLE Handle = 0x8009300F + OSS_TOO_LONG Handle = 0x80093010 + OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 + OSS_FATAL_ERROR Handle = 0x80093012 + OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 + OSS_NULL_TBL Handle = 0x80093014 + OSS_NULL_FCN Handle = 0x80093015 + OSS_BAD_ENCRULES Handle = 0x80093016 + OSS_UNAVAIL_ENCRULES Handle = 0x80093017 + OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 + OSS_UNIMPLEMENTED Handle = 0x80093019 + OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A + OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B + OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C + OSS_TABLE_MISMATCH Handle = 0x8009301D + OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E + OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F + OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 + OSS_OUT_OF_RANGE Handle = 0x80093021 + OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 + OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 + OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 + OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 + OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 + OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 + OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 + OSS_API_DLL_NOT_LINKED Handle = 0x80093029 + OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A + OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B + OSS_OPEN_TYPE_ERROR Handle = 0x8009302C + OSS_MUTEX_NOT_CREATED Handle = 0x8009302D + OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E + CRYPT_E_ASN1_ERROR Handle = 0x80093100 + CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 + CRYPT_E_ASN1_EOD Handle = 0x80093102 + CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 + CRYPT_E_ASN1_LARGE Handle = 0x80093104 + CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 + CRYPT_E_ASN1_MEMORY Handle = 0x80093106 + CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 + CRYPT_E_ASN1_BADPDU Handle = 0x80093108 + CRYPT_E_ASN1_BADARGS Handle = 0x80093109 + CRYPT_E_ASN1_BADREAL Handle = 0x8009310A + CRYPT_E_ASN1_BADTAG Handle = 0x8009310B + CRYPT_E_ASN1_CHOICE Handle = 0x8009310C + CRYPT_E_ASN1_RULE Handle = 0x8009310D + CRYPT_E_ASN1_UTF8 Handle = 0x8009310E + CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 + CRYPT_E_ASN1_NYI Handle = 0x80093134 + CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 + CRYPT_E_ASN1_NOEOD Handle = 0x80093202 + CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 + CERTSRV_E_NO_REQUEST Handle = 0x80094002 + CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 + CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 + CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 + CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 + CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 + CERTSRV_E_ROLECONFLICT Handle = 0x80094008 + CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 + CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A + CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B + CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C + CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D + CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E + CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F + CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 + CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 + CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 + CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 + CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 + CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 + CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 + CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 + CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 + CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 + CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 + CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 + CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 + CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 + CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 + CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 + CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 + CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 + CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 + CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A + CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B + CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C + CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D + CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E + CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F + CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 + CERTSRV_E_KEY_LENGTH Handle = 0x80094811 + CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 + CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 + CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 + CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 + CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 + CERTSRV_E_INVALID_EK Handle = 0x80094817 + CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 + CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 + CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A + CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B + CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C + CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D + CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E + CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F + CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 + XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 + XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 + XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 + XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 + XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 + XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 + TRUST_E_SYSTEM_ERROR Handle = 0x80096001 + TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 + TRUST_E_COUNTER_SIGNER Handle = 0x80096003 + TRUST_E_CERT_SIGNATURE Handle = 0x80096004 + TRUST_E_TIME_STAMP Handle = 0x80096005 + TRUST_E_BAD_DIGEST Handle = 0x80096010 + TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 + TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 + TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E + MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 + MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 + MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 + MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 + MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 + MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 + MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 + MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 + MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 + MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A + MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B + MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C + MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D + MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 + MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 + MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 + MSSIPOTF_E_FILE Handle = 0x80097013 + MSSIPOTF_E_CRYPT Handle = 0x80097014 + MSSIPOTF_E_BADVERSION Handle = 0x80097015 + MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 + MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 + MSSIPOTF_E_STRUCTURE Handle = 0x80097018 + ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 + NTE_OP_OK syscall.Errno = 0 + TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 + TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 + TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 + TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 + DIGSIG_E_ENCODE Handle = 0x800B0005 + DIGSIG_E_DECODE Handle = 0x800B0006 + DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 + DIGSIG_E_CRYPTO Handle = 0x800B0008 + PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 + PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A + PERSIST_E_NOTSELFSIZING Handle = 0x800B000B + TRUST_E_NOSIGNATURE Handle = 0x800B0100 + CERT_E_EXPIRED Handle = 0x800B0101 + CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 + CERT_E_ROLE Handle = 0x800B0103 + CERT_E_PATHLENCONST Handle = 0x800B0104 + CERT_E_CRITICAL Handle = 0x800B0105 + CERT_E_PURPOSE Handle = 0x800B0106 + CERT_E_ISSUERCHAINING Handle = 0x800B0107 + CERT_E_MALFORMED Handle = 0x800B0108 + CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 + CERT_E_CHAINING Handle = 0x800B010A + TRUST_E_FAIL Handle = 0x800B010B + CERT_E_REVOKED Handle = 0x800B010C + CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D + CERT_E_REVOCATION_FAILURE Handle = 0x800B010E + CERT_E_CN_NO_MATCH Handle = 0x800B010F + CERT_E_WRONG_USAGE Handle = 0x800B0110 + TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 + CERT_E_UNTRUSTEDCA Handle = 0x800B0112 + CERT_E_INVALID_POLICY Handle = 0x800B0113 + CERT_E_INVALID_NAME Handle = 0x800B0114 + SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 + SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 + SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 + SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 + SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 + SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 + SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 + SPAPI_E_NO_BACKUP Handle = 0x800F0103 + SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 + SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 + SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 + SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 + SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 + SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 + SPAPI_E_INVALID_CLASS Handle = 0x800F0206 + SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 + SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 + SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 + SPAPI_E_NO_INF Handle = 0x800F020A + SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B + SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C + SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D + SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E + SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F + SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 + SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 + SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 + SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 + SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 + SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 + SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 + SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 + SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 + SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 + SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A + SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B + SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C + SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D + SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E + SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F + SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 + SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 + SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 + SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 + SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 + SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 + SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 + SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 + SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 + SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 + SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A + SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B + SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C + SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D + SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E + SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F + SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 + SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 + SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 + SPAPI_E_INVALID_TARGET Handle = 0x800F0233 + SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 + SPAPI_E_IN_WOW64 Handle = 0x800F0235 + SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 + SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 + SPAPI_E_SCE_DISABLED Handle = 0x800F0238 + SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 + SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A + SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B + SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C + SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D + SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E + SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F + SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 + SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 + SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 + SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 + SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 + SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 + SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 + SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 + SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 + SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 + SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A + SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B + SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C + SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 + SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 + SCARD_S_SUCCESS = S_OK + SCARD_F_INTERNAL_ERROR Handle = 0x80100001 + SCARD_E_CANCELLED Handle = 0x80100002 + SCARD_E_INVALID_HANDLE Handle = 0x80100003 + SCARD_E_INVALID_PARAMETER Handle = 0x80100004 + SCARD_E_INVALID_TARGET Handle = 0x80100005 + SCARD_E_NO_MEMORY Handle = 0x80100006 + SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 + SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 + SCARD_E_UNKNOWN_READER Handle = 0x80100009 + SCARD_E_TIMEOUT Handle = 0x8010000A + SCARD_E_SHARING_VIOLATION Handle = 0x8010000B + SCARD_E_NO_SMARTCARD Handle = 0x8010000C + SCARD_E_UNKNOWN_CARD Handle = 0x8010000D + SCARD_E_CANT_DISPOSE Handle = 0x8010000E + SCARD_E_PROTO_MISMATCH Handle = 0x8010000F + SCARD_E_NOT_READY Handle = 0x80100010 + SCARD_E_INVALID_VALUE Handle = 0x80100011 + SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 + SCARD_F_COMM_ERROR Handle = 0x80100013 + SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 + SCARD_E_INVALID_ATR Handle = 0x80100015 + SCARD_E_NOT_TRANSACTED Handle = 0x80100016 + SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 + SCARD_P_SHUTDOWN Handle = 0x80100018 + SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 + SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A + SCARD_E_DUPLICATE_READER Handle = 0x8010001B + SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C + SCARD_E_NO_SERVICE Handle = 0x8010001D + SCARD_E_SERVICE_STOPPED Handle = 0x8010001E + SCARD_E_UNEXPECTED Handle = 0x8010001F + SCARD_E_ICC_INSTALLATION Handle = 0x80100020 + SCARD_E_ICC_CREATEORDER Handle = 0x80100021 + SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 + SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 + SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 + SCARD_E_NO_DIR Handle = 0x80100025 + SCARD_E_NO_FILE Handle = 0x80100026 + SCARD_E_NO_ACCESS Handle = 0x80100027 + SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 + SCARD_E_BAD_SEEK Handle = 0x80100029 + SCARD_E_INVALID_CHV Handle = 0x8010002A + SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B + SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C + SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D + SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E + SCARD_E_COMM_DATA_LOST Handle = 0x8010002F + SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 + SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 + SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 + SCARD_E_NO_PIN_CACHE Handle = 0x80100033 + SCARD_E_READ_ONLY_CARD Handle = 0x80100034 + SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 + SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 + SCARD_W_UNPOWERED_CARD Handle = 0x80100067 + SCARD_W_RESET_CARD Handle = 0x80100068 + SCARD_W_REMOVED_CARD Handle = 0x80100069 + SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A + SCARD_W_WRONG_CHV Handle = 0x8010006B + SCARD_W_CHV_BLOCKED Handle = 0x8010006C + SCARD_W_EOF Handle = 0x8010006D + SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E + SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F + SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 + SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 + SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 + COMADMIN_E_OBJECTERRORS Handle = 0x80110401 + COMADMIN_E_OBJECTINVALID Handle = 0x80110402 + COMADMIN_E_KEYMISSING Handle = 0x80110403 + COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 + COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 + COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 + COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 + COMADMIN_E_BADPATH Handle = 0x8011040A + COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B + COMADMIN_E_ROLEEXISTS Handle = 0x8011040C + COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D + COMADMIN_E_NOUSER Handle = 0x8011040F + COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 + COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 + COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 + COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 + COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 + COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 + COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 + COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A + COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B + COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D + COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E + COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F + COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 + COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 + COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 + COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 + COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 + COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 + COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 + COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A + COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B + COMADMIN_E_SESSION Handle = 0x8011042C + COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D + COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E + COMADMIN_E_REGISTERTLB Handle = 0x80110430 + COMADMIN_E_SYSTEMAPP Handle = 0x80110433 + COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 + COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 + COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 + COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 + COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 + COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 + COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B + COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C + COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E + COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F + COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 + COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 + COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 + COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 + COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A + COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B + COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C + COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D + COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E + COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F + COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 + COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 + COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 + COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 + COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 + COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A + COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B + COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C + COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D + COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 + COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 + COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 + COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 + COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 + COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 + COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 + COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 + COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 + COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 + COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 + COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 + COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 + COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 + COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 + COMQC_E_BAD_MESSAGE Handle = 0x80110604 + COMQC_E_UNAUTHENTICATED Handle = 0x80110605 + COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 + MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 + COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 + COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 + COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A + COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B + COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D + COMADMIN_E_USER_IN_SET Handle = 0x8011080E + COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F + COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 + COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 + COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 + COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 + COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 + COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 + COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 + COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 + COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B + COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C + COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D + COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E + COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F + COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 + COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 + COMADMIN_E_SAFERINVALID Handle = 0x80110822 + COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 + COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 + WER_S_REPORT_DEBUG Handle = 0x001B0000 + WER_S_REPORT_UPLOADED Handle = 0x001B0001 + WER_S_REPORT_QUEUED Handle = 0x001B0002 + WER_S_DISABLED Handle = 0x001B0003 + WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 + WER_S_DISABLED_QUEUE Handle = 0x001B0005 + WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 + WER_S_REPORT_ASYNC Handle = 0x001B0007 + WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 + WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 + WER_S_ASSERT_CONTINUE Handle = 0x001B000A + WER_S_THROTTLED Handle = 0x001B000B + WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C + WER_E_CRASH_FAILURE Handle = 0x801B8000 + WER_E_CANCELED Handle = 0x801B8001 + WER_E_NETWORK_FAILURE Handle = 0x801B8002 + WER_E_NOT_INITIALIZED Handle = 0x801B8003 + WER_E_ALREADY_REPORTING Handle = 0x801B8004 + WER_E_DUMP_THROTTLED Handle = 0x801B8005 + WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 + WER_E_TOO_HEAVY Handle = 0x801B8007 + ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 + ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 + ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 + ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 + ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 + ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 + ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 + ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 + ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 + ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 + ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A + ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B + ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C + ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D + ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E + ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F + ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 + ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 + ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 + ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 + ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 + ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 + ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 + ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 + ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 + ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 + ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A + ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B + ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C + ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 + ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 + ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 + DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 + DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 + DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 + DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 + DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 + DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 + DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 + DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 + ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 + ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 + ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 + ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 + ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 + ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 + ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 + ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 + ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 + ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A + ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 + ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 + ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 + ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 + ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 + ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 + ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 + ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 + ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 + ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 + ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A + ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B + ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C + ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D + ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E + ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F + ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 + ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 + ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 + ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 + ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 + ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 + ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 + ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 + ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 + ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 + ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 + ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 + ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 + ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 + ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 + ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 + ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 + ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 + ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 + ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 + ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 + ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 + ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 + ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 + ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A + ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B + ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 + ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 + ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 + ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 + ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 + ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 + ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 + ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C + ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D + ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E + ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F + ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 + ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 + ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 + ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 + ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 + ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B + ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C + ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D + ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E + ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F + ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 + ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 + ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 + ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 + ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 + ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 + ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 + ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 + ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 + ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 + ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A + ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B + ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C + ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D + ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E + ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F + ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 + ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 + ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 + ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 + ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 + ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 + ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 + ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 + ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 + ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A + ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B + ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D + ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E + ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F + ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 + ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 + ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 + ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 + ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 + ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 + ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 + ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 + ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A + ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B + ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C + ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 + ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 + ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F + ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 + ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 + ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 + ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 + ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 + ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 + ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 + ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 + ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 + ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 + ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A + ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B + ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C + ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 + ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 + ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 + ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 + ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 + ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B + ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C + ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E + ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F + ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 + ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 + ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 + ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 + ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 + ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A + ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C + ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D + ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F + ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 + ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 + ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 + ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 + ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 + ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 + ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 + ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 + ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 + ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 + ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B + ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C + ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D + ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 + ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 + ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA + ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB + ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC + ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE + ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF + ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 + ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 + ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 + ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 + ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 + ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 + ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 + ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 + ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 + NAP_E_INVALID_PACKET Handle = 0x80270001 + NAP_E_MISSING_SOH Handle = 0x80270002 + NAP_E_CONFLICTING_ID Handle = 0x80270003 + NAP_E_NO_CACHED_SOH Handle = 0x80270004 + NAP_E_STILL_BOUND Handle = 0x80270005 + NAP_E_NOT_REGISTERED Handle = 0x80270006 + NAP_E_NOT_INITIALIZED Handle = 0x80270007 + NAP_E_MISMATCHED_ID Handle = 0x80270008 + NAP_E_NOT_PENDING Handle = 0x80270009 + NAP_E_ID_NOT_FOUND Handle = 0x8027000A + NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B + NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C + NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D + NAP_E_ENTITY_DISABLED Handle = 0x8027000E + NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F + NAP_E_TOO_MANY_CALLS Handle = 0x80270010 + NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 + NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 + NAP_E_SHV_TIMEOUT Handle = 0x80270013 + TPM_E_ERROR_MASK Handle = 0x80280000 + TPM_E_AUTHFAIL Handle = 0x80280001 + TPM_E_BADINDEX Handle = 0x80280002 + TPM_E_BAD_PARAMETER Handle = 0x80280003 + TPM_E_AUDITFAILURE Handle = 0x80280004 + TPM_E_CLEAR_DISABLED Handle = 0x80280005 + TPM_E_DEACTIVATED Handle = 0x80280006 + TPM_E_DISABLED Handle = 0x80280007 + TPM_E_DISABLED_CMD Handle = 0x80280008 + TPM_E_FAIL Handle = 0x80280009 + TPM_E_BAD_ORDINAL Handle = 0x8028000A + TPM_E_INSTALL_DISABLED Handle = 0x8028000B + TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C + TPM_E_KEYNOTFOUND Handle = 0x8028000D + TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E + TPM_E_MIGRATEFAIL Handle = 0x8028000F + TPM_E_INVALID_PCR_INFO Handle = 0x80280010 + TPM_E_NOSPACE Handle = 0x80280011 + TPM_E_NOSRK Handle = 0x80280012 + TPM_E_NOTSEALED_BLOB Handle = 0x80280013 + TPM_E_OWNER_SET Handle = 0x80280014 + TPM_E_RESOURCES Handle = 0x80280015 + TPM_E_SHORTRANDOM Handle = 0x80280016 + TPM_E_SIZE Handle = 0x80280017 + TPM_E_WRONGPCRVAL Handle = 0x80280018 + TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 + TPM_E_SHA_THREAD Handle = 0x8028001A + TPM_E_SHA_ERROR Handle = 0x8028001B + TPM_E_FAILEDSELFTEST Handle = 0x8028001C + TPM_E_AUTH2FAIL Handle = 0x8028001D + TPM_E_BADTAG Handle = 0x8028001E + TPM_E_IOERROR Handle = 0x8028001F + TPM_E_ENCRYPT_ERROR Handle = 0x80280020 + TPM_E_DECRYPT_ERROR Handle = 0x80280021 + TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 + TPM_E_NO_ENDORSEMENT Handle = 0x80280023 + TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 + TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 + TPM_E_INVALID_POSTINIT Handle = 0x80280026 + TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 + TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 + TPM_E_BAD_MIGRATION Handle = 0x80280029 + TPM_E_BAD_SCHEME Handle = 0x8028002A + TPM_E_BAD_DATASIZE Handle = 0x8028002B + TPM_E_BAD_MODE Handle = 0x8028002C + TPM_E_BAD_PRESENCE Handle = 0x8028002D + TPM_E_BAD_VERSION Handle = 0x8028002E + TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F + TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 + TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 + TPM_E_NOTRESETABLE Handle = 0x80280032 + TPM_E_NOTLOCAL Handle = 0x80280033 + TPM_E_BAD_TYPE Handle = 0x80280034 + TPM_E_INVALID_RESOURCE Handle = 0x80280035 + TPM_E_NOTFIPS Handle = 0x80280036 + TPM_E_INVALID_FAMILY Handle = 0x80280037 + TPM_E_NO_NV_PERMISSION Handle = 0x80280038 + TPM_E_REQUIRES_SIGN Handle = 0x80280039 + TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A + TPM_E_AUTH_CONFLICT Handle = 0x8028003B + TPM_E_AREA_LOCKED Handle = 0x8028003C + TPM_E_BAD_LOCALITY Handle = 0x8028003D + TPM_E_READ_ONLY Handle = 0x8028003E + TPM_E_PER_NOWRITE Handle = 0x8028003F + TPM_E_FAMILYCOUNT Handle = 0x80280040 + TPM_E_WRITE_LOCKED Handle = 0x80280041 + TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 + TPM_E_INVALID_STRUCTURE Handle = 0x80280043 + TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 + TPM_E_BAD_COUNTER Handle = 0x80280045 + TPM_E_NOT_FULLWRITE Handle = 0x80280046 + TPM_E_CONTEXT_GAP Handle = 0x80280047 + TPM_E_MAXNVWRITES Handle = 0x80280048 + TPM_E_NOOPERATOR Handle = 0x80280049 + TPM_E_RESOURCEMISSING Handle = 0x8028004A + TPM_E_DELEGATE_LOCK Handle = 0x8028004B + TPM_E_DELEGATE_FAMILY Handle = 0x8028004C + TPM_E_DELEGATE_ADMIN Handle = 0x8028004D + TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E + TPM_E_OWNER_CONTROL Handle = 0x8028004F + TPM_E_DAA_RESOURCES Handle = 0x80280050 + TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 + TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 + TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 + TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 + TPM_E_DAA_STAGE Handle = 0x80280055 + TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 + TPM_E_DAA_WRONG_W Handle = 0x80280057 + TPM_E_BAD_HANDLE Handle = 0x80280058 + TPM_E_BAD_DELEGATE Handle = 0x80280059 + TPM_E_BADCONTEXT Handle = 0x8028005A + TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B + TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C + TPM_E_MA_DESTINATION Handle = 0x8028005D + TPM_E_MA_SOURCE Handle = 0x8028005E + TPM_E_MA_AUTHORITY Handle = 0x8028005F + TPM_E_PERMANENTEK Handle = 0x80280061 + TPM_E_BAD_SIGNATURE Handle = 0x80280062 + TPM_E_NOCONTEXTSPACE Handle = 0x80280063 + TPM_20_E_ASYMMETRIC Handle = 0x80280081 + TPM_20_E_ATTRIBUTES Handle = 0x80280082 + TPM_20_E_HASH Handle = 0x80280083 + TPM_20_E_VALUE Handle = 0x80280084 + TPM_20_E_HIERARCHY Handle = 0x80280085 + TPM_20_E_KEY_SIZE Handle = 0x80280087 + TPM_20_E_MGF Handle = 0x80280088 + TPM_20_E_MODE Handle = 0x80280089 + TPM_20_E_TYPE Handle = 0x8028008A + TPM_20_E_HANDLE Handle = 0x8028008B + TPM_20_E_KDF Handle = 0x8028008C + TPM_20_E_RANGE Handle = 0x8028008D + TPM_20_E_AUTH_FAIL Handle = 0x8028008E + TPM_20_E_NONCE Handle = 0x8028008F + TPM_20_E_PP Handle = 0x80280090 + TPM_20_E_SCHEME Handle = 0x80280092 + TPM_20_E_SIZE Handle = 0x80280095 + TPM_20_E_SYMMETRIC Handle = 0x80280096 + TPM_20_E_TAG Handle = 0x80280097 + TPM_20_E_SELECTOR Handle = 0x80280098 + TPM_20_E_INSUFFICIENT Handle = 0x8028009A + TPM_20_E_SIGNATURE Handle = 0x8028009B + TPM_20_E_KEY Handle = 0x8028009C + TPM_20_E_POLICY_FAIL Handle = 0x8028009D + TPM_20_E_INTEGRITY Handle = 0x8028009F + TPM_20_E_TICKET Handle = 0x802800A0 + TPM_20_E_RESERVED_BITS Handle = 0x802800A1 + TPM_20_E_BAD_AUTH Handle = 0x802800A2 + TPM_20_E_EXPIRED Handle = 0x802800A3 + TPM_20_E_POLICY_CC Handle = 0x802800A4 + TPM_20_E_BINDING Handle = 0x802800A5 + TPM_20_E_CURVE Handle = 0x802800A6 + TPM_20_E_ECC_POINT Handle = 0x802800A7 + TPM_20_E_INITIALIZE Handle = 0x80280100 + TPM_20_E_FAILURE Handle = 0x80280101 + TPM_20_E_SEQUENCE Handle = 0x80280103 + TPM_20_E_PRIVATE Handle = 0x8028010B + TPM_20_E_HMAC Handle = 0x80280119 + TPM_20_E_DISABLED Handle = 0x80280120 + TPM_20_E_EXCLUSIVE Handle = 0x80280121 + TPM_20_E_ECC_CURVE Handle = 0x80280123 + TPM_20_E_AUTH_TYPE Handle = 0x80280124 + TPM_20_E_AUTH_MISSING Handle = 0x80280125 + TPM_20_E_POLICY Handle = 0x80280126 + TPM_20_E_PCR Handle = 0x80280127 + TPM_20_E_PCR_CHANGED Handle = 0x80280128 + TPM_20_E_UPGRADE Handle = 0x8028012D + TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E + TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F + TPM_20_E_REBOOT Handle = 0x80280130 + TPM_20_E_UNBALANCED Handle = 0x80280131 + TPM_20_E_COMMAND_SIZE Handle = 0x80280142 + TPM_20_E_COMMAND_CODE Handle = 0x80280143 + TPM_20_E_AUTHSIZE Handle = 0x80280144 + TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 + TPM_20_E_NV_RANGE Handle = 0x80280146 + TPM_20_E_NV_SIZE Handle = 0x80280147 + TPM_20_E_NV_LOCKED Handle = 0x80280148 + TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 + TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A + TPM_20_E_NV_SPACE Handle = 0x8028014B + TPM_20_E_NV_DEFINED Handle = 0x8028014C + TPM_20_E_BAD_CONTEXT Handle = 0x80280150 + TPM_20_E_CPHASH Handle = 0x80280151 + TPM_20_E_PARENT Handle = 0x80280152 + TPM_20_E_NEEDS_TEST Handle = 0x80280153 + TPM_20_E_NO_RESULT Handle = 0x80280154 + TPM_20_E_SENSITIVE Handle = 0x80280155 + TPM_E_COMMAND_BLOCKED Handle = 0x80280400 + TPM_E_INVALID_HANDLE Handle = 0x80280401 + TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 + TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 + TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 + TPM_E_RETRY Handle = 0x80280800 + TPM_E_NEEDS_SELFTEST Handle = 0x80280801 + TPM_E_DOING_SELFTEST Handle = 0x80280802 + TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 + TPM_20_E_CONTEXT_GAP Handle = 0x80280901 + TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 + TPM_20_E_SESSION_MEMORY Handle = 0x80280903 + TPM_20_E_MEMORY Handle = 0x80280904 + TPM_20_E_SESSION_HANDLES Handle = 0x80280905 + TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 + TPM_20_E_LOCALITY Handle = 0x80280907 + TPM_20_E_YIELDED Handle = 0x80280908 + TPM_20_E_CANCELED Handle = 0x80280909 + TPM_20_E_TESTING Handle = 0x8028090A + TPM_20_E_NV_RATE Handle = 0x80280920 + TPM_20_E_LOCKOUT Handle = 0x80280921 + TPM_20_E_RETRY Handle = 0x80280922 + TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 + TBS_E_INTERNAL_ERROR Handle = 0x80284001 + TBS_E_BAD_PARAMETER Handle = 0x80284002 + TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 + TBS_E_INVALID_CONTEXT Handle = 0x80284004 + TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 + TBS_E_IOERROR Handle = 0x80284006 + TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 + TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 + TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 + TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A + TBS_E_SERVICE_START_PENDING Handle = 0x8028400B + TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C + TBS_E_COMMAND_CANCELED Handle = 0x8028400D + TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E + TBS_E_TPM_NOT_FOUND Handle = 0x8028400F + TBS_E_SERVICE_DISABLED Handle = 0x80284010 + TBS_E_NO_EVENT_LOG Handle = 0x80284011 + TBS_E_ACCESS_DENIED Handle = 0x80284012 + TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 + TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 + TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 + TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 + TPMAPI_E_INVALID_STATE Handle = 0x80290100 + TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 + TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 + TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 + TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 + TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 + TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 + TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 + TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 + TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 + TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A + TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B + TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C + TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D + TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E + TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F + TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 + TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 + TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 + TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 + TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 + TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 + TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 + TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 + TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 + TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 + TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A + TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B + TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C + TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D + TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E + TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F + TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 + TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 + TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 + TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 + TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 + TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 + TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 + TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 + TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 + TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 + TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A + TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B + TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C + TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 + TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 + TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 + TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 + TBSIMP_E_TPM_ERROR Handle = 0x80290204 + TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 + TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 + TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 + TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 + TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 + TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A + TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B + TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C + TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D + TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E + TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F + TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 + TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 + TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 + TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 + TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 + TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 + TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 + TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 + TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 + TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 + TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A + TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B + TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 + TPM_E_PPI_USER_ABORT Handle = 0x80290301 + TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 + TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 + TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 + TPM_E_PCP_ERROR_MASK Handle = 0x80290400 + TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 + TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 + TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 + TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 + TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 + TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 + TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 + TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 + TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 + TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A + TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B + TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C + TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E + TPM_E_KEY_NOT_LOADED Handle = 0x8029040F + TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 + TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 + TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 + TPM_E_NOT_PCR_BOUND Handle = 0x80290413 + TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 + TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 + TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 + TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 + TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 + TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 + TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A + TPM_E_LOCKED_OUT Handle = 0x8029041B + TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C + TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D + TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E + TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F + TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 + TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 + TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 + TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 + TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 + PLA_E_DCS_NOT_FOUND Handle = 0x80300002 + PLA_E_DCS_IN_USE Handle = 0x803000AA + PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 + PLA_E_NO_MIN_DISK Handle = 0x80300070 + PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 + PLA_S_PROPERTY_IGNORED Handle = 0x00300100 + PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 + PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 + PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 + PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 + PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 + PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 + PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 + PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 + PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 + PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A + PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B + PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C + PLA_E_NO_DUPLICATES Handle = 0x8030010D + PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E + PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F + PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 + PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 + PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 + PLA_E_CABAPI_FAILURE Handle = 0x80300113 + FVE_E_LOCKED_VOLUME Handle = 0x80310000 + FVE_E_NOT_ENCRYPTED Handle = 0x80310001 + FVE_E_NO_TPM_BIOS Handle = 0x80310002 + FVE_E_NO_MBR_METRIC Handle = 0x80310003 + FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 + FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 + FVE_E_WRONG_BOOTMGR Handle = 0x80310006 + FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 + FVE_E_NOT_ACTIVATED Handle = 0x80310008 + FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 + FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A + FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B + FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C + FVE_E_AD_NO_VALUES Handle = 0x8031000D + FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E + FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F + FVE_E_BAD_INFORMATION Handle = 0x80310010 + FVE_E_TOO_SMALL Handle = 0x80310011 + FVE_E_SYSTEM_VOLUME Handle = 0x80310012 + FVE_E_FAILED_WRONG_FS Handle = 0x80310013 + FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 + FVE_E_NOT_SUPPORTED Handle = 0x80310015 + FVE_E_BAD_DATA Handle = 0x80310016 + FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 + FVE_E_TPM_NOT_OWNED Handle = 0x80310018 + FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 + FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A + FVE_E_CONV_READ Handle = 0x8031001B + FVE_E_CONV_WRITE Handle = 0x8031001C + FVE_E_KEY_REQUIRED Handle = 0x8031001D + FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E + FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F + FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 + FVE_E_PROTECTION_DISABLED Handle = 0x80310021 + FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 + FVE_E_FOREIGN_VOLUME Handle = 0x80310023 + FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 + FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 + FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 + FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 + FVE_E_NOT_OS_VOLUME Handle = 0x80310028 + FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 + FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A + FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B + FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C + FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D + FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E + FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 + FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 + FVE_E_RELATIVE_PATH Handle = 0x80310032 + FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 + FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 + FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 + FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 + FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 + FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 + FVE_E_NOT_DECRYPTED Handle = 0x80310039 + FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A + FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B + FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C + FVE_E_KEYFILE_INVALID Handle = 0x8031003D + FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E + FVE_E_TPM_DISABLED Handle = 0x8031003F + FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 + FVE_E_TPM_INVALID_PCR Handle = 0x80310041 + FVE_E_TPM_NO_VMK Handle = 0x80310042 + FVE_E_PIN_INVALID Handle = 0x80310043 + FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 + FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 + FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 + FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 + FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 + FVE_E_NO_LICENSE Handle = 0x80310049 + FVE_E_NOT_ON_STACK Handle = 0x8031004A + FVE_E_FS_MOUNTED Handle = 0x8031004B + FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C + FVE_E_DRY_RUN_FAILED Handle = 0x8031004D + FVE_E_REBOOT_REQUIRED Handle = 0x8031004E + FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F + FVE_E_RAW_ACCESS Handle = 0x80310050 + FVE_E_RAW_BLOCKED Handle = 0x80310051 + FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 + FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 + FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 + FVE_E_MOR_FAILED Handle = 0x80310055 + FVE_E_HIDDEN_VOLUME Handle = 0x80310056 + FVE_E_TRANSIENT_STATE Handle = 0x80310057 + FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 + FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 + FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A + FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B + FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C + FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D + FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E + FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F + FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 + FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 + FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 + FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 + FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 + FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 + FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 + FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 + FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 + FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 + FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A + FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B + FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C + FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D + FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E + FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F + FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 + FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 + FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 + FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 + FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 + FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 + FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 + FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 + FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 + FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 + FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 + FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 + FVE_E_RECOVERY_PARTITION Handle = 0x80310082 + FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 + FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 + FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 + FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 + FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 + FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 + FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 + FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 + FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 + FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 + FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 + FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 + FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 + FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 + FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 + FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 + FVE_E_ENH_PIN_INVALID Handle = 0x80310099 + FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A + FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B + FVE_E_EFI_ONLY Handle = 0x8031009C + FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D + FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E + FVE_E_INVALID_NKP_CERT Handle = 0x8031009F + FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 + FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 + FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 + FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 + FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 + FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 + FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 + FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 + FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 + FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 + FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA + FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB + FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC + FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD + FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE + FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF + FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 + FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 + FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 + FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 + FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 + FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 + FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 + FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 + FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 + FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 + FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA + FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB + FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC + FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD + FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE + FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF + FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 + FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 + FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 + FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 + FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 + FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 + FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 + FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 + FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 + FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 + FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA + FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB + FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC + FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD + FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE + FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF + FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 + FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 + FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 + FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 + FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 + FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 + FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 + FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 + FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 + FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 + FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 + FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 + FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 + FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 + FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 + FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 + FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 + FWP_E_NOT_FOUND Handle = 0x80320008 + FWP_E_ALREADY_EXISTS Handle = 0x80320009 + FWP_E_IN_USE Handle = 0x8032000A + FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B + FWP_E_WRONG_SESSION Handle = 0x8032000C + FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D + FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E + FWP_E_TXN_ABORTED Handle = 0x8032000F + FWP_E_SESSION_ABORTED Handle = 0x80320010 + FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 + FWP_E_TIMEOUT Handle = 0x80320012 + FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 + FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 + FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 + FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 + FWP_E_BUILTIN_OBJECT Handle = 0x80320017 + FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 + FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 + FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A + FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B + FWP_E_NULL_POINTER Handle = 0x8032001C + FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D + FWP_E_INVALID_FLAGS Handle = 0x8032001E + FWP_E_INVALID_NET_MASK Handle = 0x8032001F + FWP_E_INVALID_RANGE Handle = 0x80320020 + FWP_E_INVALID_INTERVAL Handle = 0x80320021 + FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 + FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 + FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 + FWP_E_INVALID_WEIGHT Handle = 0x80320025 + FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 + FWP_E_TYPE_MISMATCH Handle = 0x80320027 + FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 + FWP_E_RESERVED Handle = 0x80320029 + FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A + FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B + FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C + FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D + FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E + FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F + FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 + FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 + FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 + FWP_E_NEVER_MATCH Handle = 0x80320033 + FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 + FWP_E_INVALID_PARAMETER Handle = 0x80320035 + FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 + FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 + FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 + FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 + FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A + FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B + FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C + FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D + FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E + FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F + FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 + FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 + FWP_E_INVALID_DNS_NAME Handle = 0x80320042 + FWP_E_STILL_ON Handle = 0x80320043 + FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 + FWP_E_DROP_NOICMP Handle = 0x80320104 + WS_S_ASYNC Handle = 0x003D0000 + WS_S_END Handle = 0x003D0001 + WS_E_INVALID_FORMAT Handle = 0x803D0000 + WS_E_OBJECT_FAULTED Handle = 0x803D0001 + WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 + WS_E_INVALID_OPERATION Handle = 0x803D0003 + WS_E_OPERATION_ABORTED Handle = 0x803D0004 + WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 + WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 + WS_E_OPERATION_ABANDONED Handle = 0x803D0007 + WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 + WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 + WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A + WS_E_ADDRESS_IN_USE Handle = 0x803D000B + WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C + WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D + WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E + WS_E_ENDPOINT_FAILURE Handle = 0x803D000F + WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 + WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 + WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 + WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 + WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 + WS_E_PROXY_FAILURE Handle = 0x803D0015 + WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 + WS_E_NOT_SUPPORTED Handle = 0x803D0017 + WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 + WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 + WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A + WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B + WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C + WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D + WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E + WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F + WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 + WS_E_OTHER Handle = 0x803D0021 + WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 + WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 + ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 + ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 + ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 + ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 + ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 + ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 + ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 + ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A + ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B + ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C + ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D + ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB + ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F + ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 + ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 + ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 + ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 + ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 + ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 + ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 + ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A + ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B + ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C + ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D + ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E + ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F + ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 + ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 + ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A + ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B + ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C + ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D + ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E + ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F + ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 + ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 + ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 + ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 + ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 + ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 + ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 + ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 + ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 + ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 + ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 + ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 + ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F + ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 + ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 + ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 + ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 + ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 + ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 + ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 + ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 + ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 + ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 + ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A + ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B + ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C + ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D + ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E + ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 + ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 + ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 + ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 + ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 + ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 + ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 + ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 + ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 + ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A + ERROR_HV_NO_DATA syscall.Errno = 0xC035001B + ERROR_HV_INACTIVE syscall.Errno = 0xC035001C + ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D + ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E + ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 + ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 + ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C + ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D + ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E + ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F + ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 + ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 + ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 + ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 + ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 + ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 + ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 + ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 + ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F + ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 + ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 + ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 + ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 + ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 + ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 + ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 + ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 + ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 + ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 + ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 + ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 + ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 + ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 + ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A + ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B + ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C + ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D + ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E + ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F + ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 + ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 + ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 + ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 + ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 + ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 + ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 + ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 + ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 + ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 + ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A + ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B + ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C + ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D + ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E + ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F + ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 + ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 + ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 + ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 + ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 + ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 + ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 + ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 + ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 + ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 + ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A + ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 + ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 + ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 + ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 + ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 + ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 + ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 + ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 + ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 + ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 + ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A + ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B + ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C + ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D + ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E + ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F + ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 + ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 + ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 + ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 + HCS_E_TERMINATED_DURING_START Handle = 0x80370100 + HCS_E_IMAGE_MISMATCH Handle = 0x80370101 + HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 + HCS_E_INVALID_STATE Handle = 0x80370105 + HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 + HCS_E_TERMINATED Handle = 0x80370107 + HCS_E_CONNECT_FAILED Handle = 0x80370108 + HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 + HCS_E_CONNECTION_CLOSED Handle = 0x8037010A + HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B + HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C + HCS_E_INVALID_JSON Handle = 0x8037010D + HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E + HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F + HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 + HCS_E_PROTOCOL_ERROR Handle = 0x80370111 + HCS_E_INVALID_LAYER Handle = 0x80370112 + HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 + HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 + HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 + HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 + HCS_E_OPERATION_PENDING Handle = 0x80370117 + HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 + HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 + HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A + HCS_E_ACCESS_DENIED Handle = 0x8037011B + HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C + ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 + ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 + WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 + WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 + WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 + WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 + WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 + WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 + WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 + WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 + WHV_E_INVALID_VP_STATE Handle = 0x80370308 + WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 + ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 + ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 + ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 + ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 + ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 + ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 + ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 + ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 + ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 + ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 + ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 + ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 + ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 + ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A + ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B + ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C + ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D + ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E + ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F + ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 + ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 + ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 + ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 + ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 + ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 + ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 + ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 + ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A + ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B + ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C + ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D + ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E + ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F + ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 + ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 + ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 + ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 + ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 + ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 + ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 + ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 + ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 + ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 + ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A + ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B + ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C + ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D + ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E + ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F + ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 + ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 + ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 + ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 + ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 + ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 + ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 + ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 + ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 + ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 + ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A + ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B + ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C + ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D + ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E + ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F + ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 + ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 + ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 + ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 + ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 + ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 + ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 + ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 + ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 + ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 + ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A + ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B + ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C + ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D + ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E + ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F + ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 + ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 + ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 + ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 + ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 + ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 + ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 + ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 + ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A + ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B + ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C + ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 + ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 + ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 + ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 + ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 + ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 + ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 + ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 + ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 + ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 + ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 + ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 + ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A + ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B + ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C + ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D + ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E + ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F + ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 + ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 + ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 + ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 + ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 + ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 + ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 + ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 + ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 + ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 + ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A + ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B + ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C + ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D + ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E + ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F + ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 + ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 + ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 + ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 + ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 + ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 + ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 + ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 + ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 + ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 + ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A + ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 + ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 + HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 + HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 + HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 + HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 + HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 + HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 + HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 + HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 + HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 + HCN_E_INVALID_NETWORK Handle = 0x803B000A + HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B + HCN_E_INVALID_ENDPOINT Handle = 0x803B000C + HCN_E_INVALID_POLICY Handle = 0x803B000D + HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E + HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F + HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 + HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 + HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 + HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 + HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 + HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 + HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 + HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 + HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 + HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 + HCN_E_REGKEY_FAILURE Handle = 0x803B001A + HCN_E_INVALID_JSON Handle = 0x803B001B + HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C + HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D + HCN_E_INVALID_IP Handle = 0x803B001E + HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F + HCN_E_MANAGER_STOPPED Handle = 0x803B0020 + GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 + GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 + GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 + GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 + GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 + GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 + GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 + GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 + GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 + SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 + SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 + SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 + SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 + SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 + SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 + SDIAG_E_DISABLED syscall.Errno = 0x803C0106 + SDIAG_E_TRUST syscall.Errno = 0x803C0107 + SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 + SDIAG_E_VERSION syscall.Errno = 0x803C0109 + SDIAG_E_RESOURCE syscall.Errno = 0x803C010A + SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B + WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 + WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 + WPN_E_INVALID_APP Handle = 0x803E0102 + WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 + WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 + WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 + WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 + WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 + WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 + WPN_E_CLOUD_DISABLED Handle = 0x803E0109 + WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 + WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A + WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B + WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C + WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 + WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 + WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 + WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 + WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 + WPN_E_TAG_SIZE Handle = 0x803E0116 + WPN_E_ACCESS_DENIED Handle = 0x803E0117 + WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 + WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 + WPN_E_DEV_ID_SIZE Handle = 0x803E0120 + WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A + WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B + WPN_E_OUT_OF_SESSION Handle = 0x803E0200 + WPN_E_POWER_SAVE Handle = 0x803E0201 + WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 + WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 + WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 + WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 + WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 + WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 + WPN_E_STORAGE_LOCKED Handle = 0x803E0208 + WPN_E_GROUP_SIZE Handle = 0x803E0209 + WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A + WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B + E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 + E_MBN_BAD_SIM Handle = 0x80548202 + E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 + E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 + E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 + E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 + E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 + E_MBN_RADIO_POWER_OFF Handle = 0x80548208 + E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 + E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A + E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B + E_MBN_INVALID_CACHE Handle = 0x8054820C + E_MBN_NOT_REGISTERED Handle = 0x8054820D + E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E + E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F + E_MBN_PIN_REQUIRED Handle = 0x80548210 + E_MBN_PIN_DISABLED Handle = 0x80548211 + E_MBN_FAILURE Handle = 0x80548212 + E_MBN_INVALID_PROFILE Handle = 0x80548218 + E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 + E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 + E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 + E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 + E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 + E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 + E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 + E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 + E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 + E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 + E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 + PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 + PEER_E_NOT_INITIALIZED Handle = 0x80630002 + PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 + PEER_E_NOT_LICENSED Handle = 0x80630004 + PEER_E_INVALID_GRAPH Handle = 0x80630010 + PEER_E_DBNAME_CHANGED Handle = 0x80630011 + PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 + PEER_E_GRAPH_NOT_READY Handle = 0x80630013 + PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 + PEER_E_GRAPH_IN_USE Handle = 0x80630015 + PEER_E_INVALID_DATABASE Handle = 0x80630016 + PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 + PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 + PEER_E_CONNECT_SELF Handle = 0x80630106 + PEER_E_ALREADY_LISTENING Handle = 0x80630107 + PEER_E_NODE_NOT_FOUND Handle = 0x80630108 + PEER_E_CONNECTION_FAILED Handle = 0x80630109 + PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A + PEER_E_CONNECTION_REFUSED Handle = 0x8063010B + PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 + PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 + PEER_E_NO_KEY_ACCESS Handle = 0x80630203 + PEER_E_GROUPS_EXIST Handle = 0x80630204 + PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 + PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 + PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 + PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 + PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 + PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 + PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 + PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 + PEER_E_INVALID_SEARCH Handle = 0x80630601 + PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 + PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 + PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 + PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 + PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 + PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 + PEER_E_NO_CLOUD Handle = 0x80631001 + PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 + PEER_E_INVALID_RECORD Handle = 0x80632010 + PEER_E_NOT_AUTHORIZED Handle = 0x80632020 + PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 + PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 + PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 + PEER_E_INVALID_PEER_NAME Handle = 0x80632050 + PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 + PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 + PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 + PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 + PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 + PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 + PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 + PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 + PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 + PEER_E_GROUP_NOT_READY Handle = 0x80632091 + PEER_E_GROUP_IN_USE Handle = 0x80632092 + PEER_E_INVALID_GROUP Handle = 0x80632093 + PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 + PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 + PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 + PEER_E_IDENTITY_DELETED Handle = 0x806320A0 + PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 + PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 + PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 + PEER_S_NO_EVENT_DATA Handle = 0x00630002 + PEER_S_ALREADY_CONNECTED Handle = 0x00632000 + PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 + PEER_S_NO_CONNECTIVITY Handle = 0x00630005 + PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 + PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 + PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 + PEER_E_NO_MORE Handle = 0x80634003 + PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 + PEER_E_INVITE_CANCELLED Handle = 0x80637000 + PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 + PEER_E_NOT_SIGNED_IN Handle = 0x80637003 + PEER_E_PRIVACY_DECLINED Handle = 0x80637004 + PEER_E_TIMEOUT Handle = 0x80637005 + PEER_E_INVALID_ADDRESS Handle = 0x80637007 + PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 + PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 + PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A + PEER_E_FW_DECLINED Handle = 0x8063700B + UI_E_CREATE_FAILED Handle = 0x802A0001 + UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 + UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 + UI_E_OBJECT_SEALED Handle = 0x802A0004 + UI_E_VALUE_NOT_SET Handle = 0x802A0005 + UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 + UI_E_INVALID_OUTPUT Handle = 0x802A0007 + UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 + UI_E_DIFFERENT_OWNER Handle = 0x802A0009 + UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A + UI_E_FP_OVERFLOW Handle = 0x802A000B + UI_E_WRONG_THREAD Handle = 0x802A000C + UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 + UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 + UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 + UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 + UI_E_LOOPS_OVERLAP Handle = 0x802A0105 + UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 + UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 + UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 + UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 + UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A + UI_E_INVALID_DIMENSION Handle = 0x802A010B + UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C + UI_E_WINDOW_CLOSED Handle = 0x802A0201 + E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 + E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 + E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 + E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 + E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 + E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 + E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C + E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D + E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F + E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 + E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 + E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 + E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 + E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 + E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 + E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 + E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 + STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 + STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 + STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 + STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 + STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 + STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 + STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 + STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 + STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A + STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B + STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C + STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D + STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F + STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 + STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 + STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 + STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 + STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 + ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 + ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 + ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 + ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 + ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 + ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 + ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 + ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 + ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 + ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A + ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B + ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C + ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D + ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E + ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F + ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 + ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 + ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 + ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 + ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 + ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 + ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 + ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 + ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 + ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 + ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 + ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 + ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 + ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 + ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 + ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 + ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A + ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 + ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 + ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 + ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 + ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 + ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 + ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 + ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 + ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 + ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 + ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A + DXGI_STATUS_OCCLUDED Handle = 0x087A0001 + DXGI_STATUS_CLIPPED Handle = 0x087A0002 + DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 + DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 + DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 + DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 + DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 + DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 + DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 + DXGI_ERROR_MORE_DATA Handle = 0x887A0003 + DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 + DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 + DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 + DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 + DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A + DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B + DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C + DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 + DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 + DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 + DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 + DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 + DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 + DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 + DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 + DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 + DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A + DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B + DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C + DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D + DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E + DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 + DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 + DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 + DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 + DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A + DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 + DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F + DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 + DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 + DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 + DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 + DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 + DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 + DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 + D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 + D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 + D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 + D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 + D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 + D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 + D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 + D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 + D2DERR_WRONG_STATE Handle = 0x88990001 + D2DERR_NOT_INITIALIZED Handle = 0x88990002 + D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 + D2DERR_SCANNER_FAILED Handle = 0x88990004 + D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 + D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 + D2DERR_ZERO_VECTOR Handle = 0x88990007 + D2DERR_INTERNAL_ERROR Handle = 0x88990008 + D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 + D2DERR_INVALID_CALL Handle = 0x8899000A + D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B + D2DERR_RECREATE_TARGET Handle = 0x8899000C + D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D + D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E + D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F + D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 + D2DERR_BAD_NUMBER Handle = 0x88990011 + D2DERR_WRONG_FACTORY Handle = 0x88990012 + D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 + D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 + D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 + D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 + D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 + D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 + D2DERR_WIN32_ERROR Handle = 0x88990019 + D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A + D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B + D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C + D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D + D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E + D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F + D2DERR_CYCLIC_GRAPH Handle = 0x88990020 + D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 + D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 + D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 + D2DERR_INVALID_TARGET Handle = 0x88990024 + D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 + D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 + D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 + D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 + D2DERR_INVALID_PROPERTY Handle = 0x88990029 + D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A + D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B + D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C + D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D + D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E + DWRITE_E_FILEFORMAT Handle = 0x88985000 + DWRITE_E_UNEXPECTED Handle = 0x88985001 + DWRITE_E_NOFONT Handle = 0x88985002 + DWRITE_E_FILENOTFOUND Handle = 0x88985003 + DWRITE_E_FILEACCESS Handle = 0x88985004 + DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 + DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 + DWRITE_E_CACHEFORMAT Handle = 0x88985007 + DWRITE_E_CACHEVERSION Handle = 0x88985008 + DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 + DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A + DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B + DWRITE_E_NOCOLOR Handle = 0x8898500C + DWRITE_E_REMOTEFONT Handle = 0x8898500D + DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E + DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F + DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 + WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 + WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 + WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 + WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B + WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C + WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D + WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 + WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 + WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 + WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 + WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 + WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 + WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 + WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 + WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 + WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 + WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 + WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 + WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 + WINCODEC_ERR_BADHEADER Handle = 0x88982F61 + WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 + WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 + WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 + WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 + WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 + WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 + WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 + WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 + WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A + WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B + WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C + WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D + WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E + WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F + WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 + WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 + WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 + WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 + WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 + WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 + WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 + MILERR_OBJECTBUSY Handle = 0x88980001 + MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 + MILERR_WIN32ERROR Handle = 0x88980003 + MILERR_SCANNER_FAILED Handle = 0x88980004 + MILERR_SCREENACCESSDENIED Handle = 0x88980005 + MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 + MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 + MILERR_ZEROVECTOR Handle = 0x88980008 + MILERR_TERMINATED Handle = 0x88980009 + MILERR_BADNUMBER Handle = 0x8898000A + MILERR_INTERNALERROR Handle = 0x88980080 + MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 + MILERR_INVALIDCALL Handle = 0x88980085 + MILERR_ALREADYLOCKED Handle = 0x88980086 + MILERR_NOTLOCKED Handle = 0x88980087 + MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 + MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 + MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A + MILERR_GENERIC_IGNORE Handle = 0x8898008B + MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C + MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D + MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E + MILERR_ALREADY_INITIALIZED Handle = 0x8898008F + MILERR_MISMATCHED_SIZE Handle = 0x88980090 + MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 + MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 + MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 + MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 + MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 + MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 + MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 + MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 + MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 + MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A + MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B + MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D + MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E + MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F + MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 + MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 + UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 + UCEERR_UNKNOWNPACKET Handle = 0x88980401 + UCEERR_ILLEGALPACKET Handle = 0x88980402 + UCEERR_MALFORMEDPACKET Handle = 0x88980403 + UCEERR_ILLEGALHANDLE Handle = 0x88980404 + UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 + UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 + UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 + UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 + UCEERR_BLOCKSFULL Handle = 0x88980409 + UCEERR_MEMORYFAILURE Handle = 0x8898040A + UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B + UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C + UCEERR_OUTOFHANDLES Handle = 0x8898040D + UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E + UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F + UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 + UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 + UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 + UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 + UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 + UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 + UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 + UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 + UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 + UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 + UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 + UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 + UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 + UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 + MILAVERR_NOCLOCK Handle = 0x88980500 + MILAVERR_NOMEDIATYPE Handle = 0x88980501 + MILAVERR_NOVIDEOMIXER Handle = 0x88980502 + MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 + MILAVERR_NOREADYFRAMES Handle = 0x88980504 + MILAVERR_MODULENOTLOADED Handle = 0x88980505 + MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 + MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 + MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 + MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 + MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A + MILAVERR_SEEKFAILED Handle = 0x8898050B + MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C + MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D + MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E + MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E + MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F + MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 + MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 + MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 + MILEFFECTSERR_RESERVED Handle = 0x88980613 + MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 + MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 + MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 + MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 + MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 + MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 + MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A + MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B + DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 + DWMERR_THEME_FAILED Handle = 0x88980701 + DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 + DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 + DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 + DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 + ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 + ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 + ONL_E_INVALID_APPLICATION Handle = 0x80860003 + ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 + ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 + ONL_E_FORCESIGNIN Handle = 0x80860006 + ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 + ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 + ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 + ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A + ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B + ONL_E_ACTION_REQUIRED Handle = 0x8086000C + ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D + ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E + ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F + ONL_E_REQUEST_THROTTLED Handle = 0x80860010 + FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 + FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 + E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 + E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 + E_UAC_DISABLED Handle = 0x80270252 + E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 + E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 + E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 + E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 + E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 + S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 + S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 + E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A + E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B + E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C + E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D + E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 + E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 + E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 + E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 + E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 + E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 + E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 + E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 + E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 + E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 + E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 + E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 + E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 + E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 + E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 + E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 + E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 + E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 + E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 + E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 + E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 + E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 + E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 + E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 + E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 + E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 + E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 + E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A + E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B + EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 + EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 + EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 + EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 + EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 + EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 + EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 + EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 + EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A + EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C + EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D + WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 + WEB_E_INVALID_XML Handle = 0x83750002 + WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 + WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 + WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 + WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 + WEB_E_INVALID_JSON_STRING Handle = 0x83750007 + WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 + WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 + HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 + HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 + HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 + HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 + HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C + HTTP_E_STATUS_MOVED Handle = 0x8019012D + HTTP_E_STATUS_REDIRECT Handle = 0x8019012E + HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F + HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 + HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 + HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 + HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 + HTTP_E_STATUS_DENIED Handle = 0x80190191 + HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 + HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 + HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 + HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 + HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 + HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 + HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 + HTTP_E_STATUS_CONFLICT Handle = 0x80190199 + HTTP_E_STATUS_GONE Handle = 0x8019019A + HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B + HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C + HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D + HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E + HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F + HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 + HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 + HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 + HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 + HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 + HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 + HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 + HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 + E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 + E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 + E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 + E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 + E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 + INPUT_E_OUT_OF_ORDER Handle = 0x80400000 + INPUT_E_REENTRANCY Handle = 0x80400001 + INPUT_E_MULTIMODAL Handle = 0x80400002 + INPUT_E_PACKET Handle = 0x80400003 + INPUT_E_FRAME Handle = 0x80400004 + INPUT_E_HISTORY Handle = 0x80400005 + INPUT_E_DEVICE_INFO Handle = 0x80400006 + INPUT_E_TRANSFORM Handle = 0x80400007 + INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 + INET_E_INVALID_URL Handle = 0x800C0002 + INET_E_NO_SESSION Handle = 0x800C0003 + INET_E_CANNOT_CONNECT Handle = 0x800C0004 + INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 + INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 + INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 + INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 + INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 + INET_E_NO_VALID_MEDIA Handle = 0x800C000A + INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B + INET_E_INVALID_REQUEST Handle = 0x800C000C + INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D + INET_E_SECURITY_PROBLEM Handle = 0x800C000E + INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F + INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 + INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 + INET_E_REDIRECT_FAILED Handle = 0x800C0014 + INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 + ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 + ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 + ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 + ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 + ERROR_IO_PREEMPTED Handle = 0x89010001 + JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 + WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 + WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 + WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 + WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 + WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 + WEP_E_NO_LICENSE Handle = 0x88010006 + WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 + WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 + WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 + ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 + ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 + ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 + ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 + ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 + ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 + ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 + ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 + ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 + ERROR_VHD_SHARED Handle = 0xC05CFF0A + ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B + ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C + ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 + ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 + WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 + WININET_E_TIMEOUT Handle = 0x80072EE2 + WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 + WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 + WININET_E_INVALID_URL Handle = 0x80072EE5 + WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 + WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 + WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 + WININET_E_INVALID_OPTION Handle = 0x80072EE9 + WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA + WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB + WININET_E_SHUTDOWN Handle = 0x80072EEC + WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED + WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE + WININET_E_LOGIN_FAILURE Handle = 0x80072EEF + WININET_E_INVALID_OPERATION Handle = 0x80072EF0 + WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 + WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 + WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 + WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 + WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 + WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 + WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 + WININET_E_NO_CONTEXT Handle = 0x80072EF8 + WININET_E_NO_CALLBACK Handle = 0x80072EF9 + WININET_E_REQUEST_PENDING Handle = 0x80072EFA + WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB + WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC + WININET_E_CANNOT_CONNECT Handle = 0x80072EFD + WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE + WININET_E_CONNECTION_RESET Handle = 0x80072EFF + WININET_E_FORCE_RETRY Handle = 0x80072F00 + WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 + WININET_E_NEED_UI Handle = 0x80072F02 + WININET_E_HANDLE_EXISTS Handle = 0x80072F04 + WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 + WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 + WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 + WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 + WININET_E_MIXED_SECURITY Handle = 0x80072F09 + WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A + WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B + WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C + WININET_E_INVALID_CA Handle = 0x80072F0D + WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E + WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F + WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 + WININET_E_DIALOG_PENDING Handle = 0x80072F11 + WININET_E_RETRY_DIALOG Handle = 0x80072F12 + WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 + WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 + WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 + WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 + WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 + WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 + WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 + WININET_E_INVALID_HEADER Handle = 0x80072F79 + WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A + WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B + WININET_E_REDIRECT_FAILED Handle = 0x80072F7C + WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D + WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E + WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F + WININET_E_DISCONNECTED Handle = 0x80072F83 + WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 + WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 + WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 + WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 + WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 + WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A + WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B + WININET_E_NOT_INITIALIZED Handle = 0x80072F8C + WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E + WININET_E_DECODING_FAILED Handle = 0x80072F8F + WININET_E_NOT_REDIRECTED Handle = 0x80072F80 + WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 + WININET_E_COOKIE_DECLINED Handle = 0x80072F82 + WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 + SQLITE_E_ERROR Handle = 0x87AF0001 + SQLITE_E_INTERNAL Handle = 0x87AF0002 + SQLITE_E_PERM Handle = 0x87AF0003 + SQLITE_E_ABORT Handle = 0x87AF0004 + SQLITE_E_BUSY Handle = 0x87AF0005 + SQLITE_E_LOCKED Handle = 0x87AF0006 + SQLITE_E_NOMEM Handle = 0x87AF0007 + SQLITE_E_READONLY Handle = 0x87AF0008 + SQLITE_E_INTERRUPT Handle = 0x87AF0009 + SQLITE_E_IOERR Handle = 0x87AF000A + SQLITE_E_CORRUPT Handle = 0x87AF000B + SQLITE_E_NOTFOUND Handle = 0x87AF000C + SQLITE_E_FULL Handle = 0x87AF000D + SQLITE_E_CANTOPEN Handle = 0x87AF000E + SQLITE_E_PROTOCOL Handle = 0x87AF000F + SQLITE_E_EMPTY Handle = 0x87AF0010 + SQLITE_E_SCHEMA Handle = 0x87AF0011 + SQLITE_E_TOOBIG Handle = 0x87AF0012 + SQLITE_E_CONSTRAINT Handle = 0x87AF0013 + SQLITE_E_MISMATCH Handle = 0x87AF0014 + SQLITE_E_MISUSE Handle = 0x87AF0015 + SQLITE_E_NOLFS Handle = 0x87AF0016 + SQLITE_E_AUTH Handle = 0x87AF0017 + SQLITE_E_FORMAT Handle = 0x87AF0018 + SQLITE_E_RANGE Handle = 0x87AF0019 + SQLITE_E_NOTADB Handle = 0x87AF001A + SQLITE_E_NOTICE Handle = 0x87AF001B + SQLITE_E_WARNING Handle = 0x87AF001C + SQLITE_E_ROW Handle = 0x87AF0064 + SQLITE_E_DONE Handle = 0x87AF0065 + SQLITE_E_IOERR_READ Handle = 0x87AF010A + SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A + SQLITE_E_IOERR_WRITE Handle = 0x87AF030A + SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A + SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A + SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A + SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A + SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A + SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A + SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A + SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A + SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A + SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A + SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A + SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A + SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A + SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A + SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A + SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A + SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A + SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A + SQLITE_E_IOERR_SEEK Handle = 0x87AF160A + SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A + SQLITE_E_IOERR_MMAP Handle = 0x87AF180A + SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A + SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A + SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 + SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 + SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 + SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 + SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 + SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E + SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E + SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E + SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E + SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B + SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 + SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 + SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 + SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 + SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 + SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 + SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 + SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 + SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 + SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 + SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 + SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 + SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 + SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 + SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 + SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B + SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B + SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C + UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 + UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 + UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 + UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 + UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 + UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 + UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 + UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 + UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 + UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A + UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B + UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C + UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D + UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E + UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F + UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 + UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 + UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 + UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 + UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 + UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 + UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 + UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 + UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 + UTC_E_INVALID_FILTER Handle = 0x87C51019 + UTC_E_EXE_TERMINATED Handle = 0x87C5101A + UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B + UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C + UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D + UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E + UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F + UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 + UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 + UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 + UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 + UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 + UTC_E_DELAY_TERMINATED Handle = 0x87C51025 + UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 + UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 + UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 + UTC_E_RPC_TIMEOUT Handle = 0x87C51029 + UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A + UTC_E_API_BUSY Handle = 0x87C5102B + UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C + UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D + UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E + UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F + UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 + UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 + UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 + UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 + UTC_E_BINARY_MISSING Handle = 0x87C51034 + UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 + UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 + UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 + UTC_E_THROTTLED Handle = 0x87C51038 + UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 + UTC_E_SCRIPT_MISSING Handle = 0x87C5103A + UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B + UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C + UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D + UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E + UTC_E_CERT_REV_FAILED Handle = 0x87C5103F + UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 + UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 + UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 + UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 + UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 + UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 + UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 + UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 + UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 + UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 + UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 + UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 + UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 + UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 + UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 + UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 + UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 + UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 + UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 + UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 + UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A + UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B + WINML_ERR_INVALID_DEVICE Handle = 0x88900001 + WINML_ERR_INVALID_BINDING Handle = 0x88900002 + WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 + WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 +) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 00000000000..6048ac679fa --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e4b54e2d929..d461bed98ac 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -38,14 +38,18 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modshell32 = NewLazySystemDLL("shell32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + modole32 = NewLazySystemDLL("ole32.dll") + modntdll = NewLazySystemDLL("ntdll.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") modnetapi32 = NewLazySystemDLL("netapi32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") + modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") @@ -57,6 +61,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procStartServiceW = modadvapi32.NewProc("StartServiceW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procControlService = modadvapi32.NewProc("ControlService") procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") @@ -66,6 +71,7 @@ var ( procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") procGetLastError = modkernel32.NewProc("GetLastError") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") @@ -74,9 +80,11 @@ var ( procGetVersion = modkernel32.NewProc("GetVersion") procFormatMessageW = modkernel32.NewProc("FormatMessageW") procExitProcess = modkernel32.NewProc("ExitProcess") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") procCreateFileW = modkernel32.NewProc("CreateFileW") procReadFile = modkernel32.NewProc("ReadFile") procWriteFile = modkernel32.NewProc("WriteFile") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procCloseHandle = modkernel32.NewProc("CloseHandle") procGetStdHandle = modkernel32.NewProc("GetStdHandle") @@ -85,6 +93,7 @@ var ( procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindClose = modkernel32.NewProc("FindClose") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") @@ -105,10 +114,13 @@ var ( procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateProcessW = modkernel32.NewProc("CreateProcessW") procOpenProcess = modkernel32.NewProc("OpenProcess") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procTerminateProcess = modkernel32.NewProc("TerminateProcess") procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") @@ -123,6 +135,9 @@ var ( procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") procSetFileTime = modkernel32.NewProc("SetFileTime") procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") @@ -170,6 +185,8 @@ var ( procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procProcess32FirstW = modkernel32.NewProc("Process32FirstW") procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") @@ -180,6 +197,18 @@ var ( procSetEvent = modkernel32.NewProc("SetEvent") procResetEvent = modkernel32.NewProc("ResetEvent") procPulseEvent = modkernel32.NewProc("PulseEvent") + procSleepEx = modkernel32.NewProc("SleepEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -199,6 +228,12 @@ var ( procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -246,12 +281,31 @@ var ( procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procCopySid = modadvapi32.NewProc("CopySid") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") procFreeSid = modadvapi32.NewProc("FreeSid") procEqualSid = modadvapi32.NewProc("EqualSid") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procIsValidSid = modadvapi32.NewProc("IsValidSid") procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") ) func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { @@ -378,6 +432,18 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { return } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { @@ -486,6 +552,14 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize return } +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -607,7 +681,19 @@ func ExitProcess(exitcode uint32) { return } -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) handle = Handle(r0) if handle == InvalidHandle { @@ -652,6 +738,24 @@ func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) return } +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) newlowoffset = uint32(r0) @@ -751,6 +855,18 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e return } +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) n = uint32(r0) @@ -974,14 +1090,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } -func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { var _p0 uint32 if inheritHandle { _p0 = 1 } else { _p0 = 0 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { if e1 != 0 { @@ -993,6 +1109,26 @@ func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err return } +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func TerminateProcess(handle Handle, exitcode uint32) (err error) { r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) if r1 == 0 { @@ -1042,6 +1178,19 @@ func GetCurrentProcess() (pseudoHandle Handle, err error) { return } +func GetCurrentThread() (pseudoHandle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + pseudoHandle = Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { @@ -1228,6 +1377,42 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { return } +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) if r1 == 0 { @@ -1670,7 +1855,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 return } -func getCurrentProcessId() (pid uint32) { +func GetCurrentProcessId() (pid uint32) { r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) pid = uint32(r0) return @@ -1773,6 +1958,30 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { return } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1896,6 +2105,156 @@ func PulseEvent(event Handle) (err error) { return } +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) + return +} + +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2123,6 +2482,54 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -2653,6 +3060,24 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s return } +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 + return +} + func FreeSid(sid *SID) (err error) { r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) if r1 != 0 { @@ -2671,6 +3096,30 @@ func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { return } +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) + return +} + +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 + return +} + func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { @@ -2683,8 +3132,26 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( return } -func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -2695,8 +3162,116 @@ func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { return } -func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -2718,3 +3293,45 @@ func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { } return } + +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/text/transform/transform.go b/cluster-autoscaler/vendor/golang.org/x/text/transform/transform.go index 919e3d950e9..520b9ada0e2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/text/transform/transform.go +++ b/cluster-autoscaler/vendor/golang.org/x/text/transform/transform.go @@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro return dstL.n, srcL.p, err } -// Deprecated: use runes.Remove instead. +// Deprecated: Use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } diff --git a/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 2e1ff19599d..d8c94e1bd1a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package bidi diff --git a/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go new file mode 100644 index 00000000000..022e3c69092 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -0,0 +1,1887 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16512 bytes (16.12 KiB). Checksum: 2a9cf1317f2ffaa. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 234 blocks, 14976 entries, 14976 bytes +// The third block is the zero block. +var bidiValues = [14976]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x000d, 0x2841: 0x000d, 0x2842: 0x000d, 0x2843: 0x000d, 0x2844: 0x000d, 0x2845: 0x000d, + 0x2846: 0x000d, 0x2847: 0x000d, 0x2848: 0x000d, 0x2849: 0x000d, 0x284a: 0x000d, 0x284b: 0x000d, + 0x284c: 0x000d, 0x284d: 0x000d, 0x284e: 0x000d, 0x284f: 0x000d, 0x2850: 0x000d, 0x2851: 0x000d, + 0x2852: 0x000d, 0x2853: 0x000d, 0x2854: 0x000d, 0x2855: 0x000d, 0x2856: 0x000d, 0x2857: 0x000d, + 0x2858: 0x000d, 0x2859: 0x000d, 0x285a: 0x000d, 0x285b: 0x000d, 0x285c: 0x000d, 0x285d: 0x000d, + 0x285e: 0x000d, 0x285f: 0x000d, 0x2860: 0x000d, 0x2861: 0x000d, 0x2862: 0x000d, 0x2863: 0x000d, + 0x2864: 0x000c, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x000c, 0x2868: 0x000d, 0x2869: 0x000d, + 0x286a: 0x000d, 0x286b: 0x000d, 0x286c: 0x000d, 0x286d: 0x000d, 0x286e: 0x000d, 0x286f: 0x000d, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x000d, 0x287b: 0x000d, + 0x287c: 0x000d, 0x287d: 0x000d, 0x287e: 0x000d, 0x287f: 0x000d, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x0001, 0x28c1: 0x0001, 0x28c2: 0x0001, 0x28c3: 0x0001, 0x28c4: 0x0001, 0x28c5: 0x0001, + 0x28c6: 0x0001, 0x28c7: 0x0001, 0x28c8: 0x0001, 0x28c9: 0x0001, 0x28ca: 0x0001, 0x28cb: 0x0001, + 0x28cc: 0x0001, 0x28cd: 0x0001, 0x28ce: 0x0001, 0x28cf: 0x0001, 0x28d0: 0x0001, 0x28d1: 0x0001, + 0x28d2: 0x0001, 0x28d3: 0x0001, 0x28d4: 0x0001, 0x28d5: 0x0001, 0x28d6: 0x0001, 0x28d7: 0x0001, + 0x28d8: 0x0001, 0x28d9: 0x0001, 0x28da: 0x0001, 0x28db: 0x0001, 0x28dc: 0x0001, 0x28dd: 0x0001, + 0x28de: 0x0001, 0x28df: 0x0001, 0x28e0: 0x0001, 0x28e1: 0x0001, 0x28e2: 0x0001, 0x28e3: 0x0001, + 0x28e4: 0x0001, 0x28e5: 0x0001, 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x000d, 0x28f1: 0x000d, 0x28f2: 0x000d, 0x28f3: 0x000d, 0x28f4: 0x000d, 0x28f5: 0x000d, + 0x28f6: 0x000d, 0x28f7: 0x000d, 0x28f8: 0x000d, 0x28f9: 0x000d, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000d, 0x2901: 0x000d, 0x2902: 0x000d, 0x2903: 0x000d, 0x2904: 0x000d, 0x2905: 0x000d, + 0x2906: 0x000c, 0x2907: 0x000c, 0x2908: 0x000c, 0x2909: 0x000c, 0x290a: 0x000c, 0x290b: 0x000c, + 0x290c: 0x000c, 0x290d: 0x000c, 0x290e: 0x000c, 0x290f: 0x000c, 0x2910: 0x000c, 0x2911: 0x000d, + 0x2912: 0x000d, 0x2913: 0x000d, 0x2914: 0x000d, 0x2915: 0x000d, 0x2916: 0x000d, 0x2917: 0x000d, + 0x2918: 0x000d, 0x2919: 0x000d, 0x291a: 0x000d, 0x291b: 0x000d, 0x291c: 0x000d, 0x291d: 0x000d, + 0x291e: 0x000d, 0x291f: 0x000d, 0x2920: 0x000d, 0x2921: 0x000d, 0x2922: 0x000d, 0x2923: 0x000d, + 0x2924: 0x000d, 0x2925: 0x000d, 0x2926: 0x000d, 0x2927: 0x000d, 0x2928: 0x000d, 0x2929: 0x000d, + 0x292a: 0x000d, 0x292b: 0x000d, 0x292c: 0x000d, 0x292d: 0x000d, 0x292e: 0x000d, 0x292f: 0x000d, + 0x2930: 0x0001, 0x2931: 0x0001, 0x2932: 0x0001, 0x2933: 0x0001, 0x2934: 0x0001, 0x2935: 0x0001, + 0x2936: 0x0001, 0x2937: 0x0001, 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001, + 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2941: 0x000c, + 0x2978: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, 0x297b: 0x000c, + 0x297c: 0x000c, 0x297d: 0x000c, 0x297e: 0x000c, 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x2983: 0x000c, 0x2984: 0x000c, 0x2985: 0x000c, + 0x2986: 0x000c, + 0x2992: 0x000a, 0x2993: 0x000a, 0x2994: 0x000a, 0x2995: 0x000a, 0x2996: 0x000a, 0x2997: 0x000a, + 0x2998: 0x000a, 0x2999: 0x000a, 0x299a: 0x000a, 0x299b: 0x000a, 0x299c: 0x000a, 0x299d: 0x000a, + 0x299e: 0x000a, 0x299f: 0x000a, 0x29a0: 0x000a, 0x29a1: 0x000a, 0x29a2: 0x000a, 0x29a3: 0x000a, + 0x29a4: 0x000a, 0x29a5: 0x000a, + 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f3: 0x000c, 0x29f4: 0x000c, 0x29f5: 0x000c, + 0x29f6: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a02: 0x000c, + 0x2a27: 0x000c, 0x2a28: 0x000c, 0x2a29: 0x000c, + 0x2a2a: 0x000c, 0x2a2b: 0x000c, 0x2a2d: 0x000c, 0x2a2e: 0x000c, 0x2a2f: 0x000c, + 0x2a30: 0x000c, 0x2a31: 0x000c, 0x2a32: 0x000c, 0x2a33: 0x000c, 0x2a34: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a73: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac9: 0x000c, 0x2aca: 0x000c, 0x2acb: 0x000c, + 0x2acc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b2f: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b34: 0x000c, + 0x2b36: 0x000c, 0x2b37: 0x000c, + 0x2b3e: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b5f: 0x000c, 0x2b63: 0x000c, + 0x2b64: 0x000c, 0x2b65: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x000c, + 0x2ba6: 0x000c, 0x2ba7: 0x000c, 0x2ba8: 0x000c, 0x2ba9: 0x000c, + 0x2baa: 0x000c, 0x2bab: 0x000c, 0x2bac: 0x000c, + 0x2bb0: 0x000c, 0x2bb1: 0x000c, 0x2bb2: 0x000c, 0x2bb3: 0x000c, 0x2bb4: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c02: 0x000c, 0x2c03: 0x000c, 0x2c04: 0x000c, + 0x2c06: 0x000c, + 0x2c1e: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, 0x2c78: 0x000c, 0x2c7a: 0x000c, + 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, 0x2c82: 0x000c, 0x2c83: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cfc: 0x000c, 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d1c: 0x000c, 0x2d1d: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d76: 0x000c, 0x2d77: 0x000c, 0x2d78: 0x000c, 0x2d79: 0x000c, 0x2d7a: 0x000c, + 0x2d7d: 0x000c, 0x2d7f: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x000c, + 0x2da0: 0x000a, 0x2da1: 0x000a, 0x2da2: 0x000a, 0x2da3: 0x000a, + 0x2da4: 0x000a, 0x2da5: 0x000a, 0x2da6: 0x000a, 0x2da7: 0x000a, 0x2da8: 0x000a, 0x2da9: 0x000a, + 0x2daa: 0x000a, 0x2dab: 0x000a, 0x2dac: 0x000a, + // Block 0xb7, offset 0x2dc0 + 0x2deb: 0x000c, 0x2ded: 0x000c, + 0x2df0: 0x000c, 0x2df1: 0x000c, 0x2df2: 0x000c, 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df7: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e1d: 0x000c, + 0x2e1e: 0x000c, 0x2e1f: 0x000c, 0x2e22: 0x000c, 0x2e23: 0x000c, + 0x2e24: 0x000c, 0x2e25: 0x000c, 0x2e27: 0x000c, 0x2e28: 0x000c, 0x2e29: 0x000c, + 0x2e2a: 0x000c, 0x2e2b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e71: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e74: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, 0x2e77: 0x000c, 0x2e79: 0x000c, 0x2e7a: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x314f: 0x000c, 0x3150: 0x000c, 0x3151: 0x000c, + 0x3152: 0x000c, + // Block 0xc6, offset 0x3180 + 0x319d: 0x000c, + 0x319e: 0x000c, 0x31a0: 0x000b, 0x31a1: 0x000b, 0x31a2: 0x000b, 0x31a3: 0x000b, + // Block 0xc7, offset 0x31c0 + 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31f3: 0x000b, 0x31f4: 0x000b, 0x31f5: 0x000b, + 0x31f6: 0x000b, 0x31f7: 0x000b, 0x31f8: 0x000b, 0x31f9: 0x000b, 0x31fa: 0x000b, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x322d: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3240: 0x000a, 0x3241: 0x000a, 0x3242: 0x000c, 0x3243: 0x000c, 0x3244: 0x000c, 0x3245: 0x000a, + // Block 0xca, offset 0x3280 + 0x3280: 0x000a, 0x3281: 0x000a, 0x3282: 0x000a, 0x3283: 0x000a, 0x3284: 0x000a, 0x3285: 0x000a, + 0x3286: 0x000a, 0x3287: 0x000a, 0x3288: 0x000a, 0x3289: 0x000a, 0x328a: 0x000a, 0x328b: 0x000a, + 0x328c: 0x000a, 0x328d: 0x000a, 0x328e: 0x000a, 0x328f: 0x000a, 0x3290: 0x000a, 0x3291: 0x000a, + 0x3292: 0x000a, 0x3293: 0x000a, 0x3294: 0x000a, 0x3295: 0x000a, 0x3296: 0x000a, + // Block 0xcb, offset 0x32c0 + 0x32db: 0x000a, + // Block 0xcc, offset 0x3300 + 0x3315: 0x000a, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000a, + // Block 0xce, offset 0x3380 + 0x3389: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c3: 0x000a, + 0x33ce: 0x0002, 0x33cf: 0x0002, 0x33d0: 0x0002, 0x33d1: 0x0002, + 0x33d2: 0x0002, 0x33d3: 0x0002, 0x33d4: 0x0002, 0x33d5: 0x0002, 0x33d6: 0x0002, 0x33d7: 0x0002, + 0x33d8: 0x0002, 0x33d9: 0x0002, 0x33da: 0x0002, 0x33db: 0x0002, 0x33dc: 0x0002, 0x33dd: 0x0002, + 0x33de: 0x0002, 0x33df: 0x0002, 0x33e0: 0x0002, 0x33e1: 0x0002, 0x33e2: 0x0002, 0x33e3: 0x0002, + 0x33e4: 0x0002, 0x33e5: 0x0002, 0x33e6: 0x0002, 0x33e7: 0x0002, 0x33e8: 0x0002, 0x33e9: 0x0002, + 0x33ea: 0x0002, 0x33eb: 0x0002, 0x33ec: 0x0002, 0x33ed: 0x0002, 0x33ee: 0x0002, 0x33ef: 0x0002, + 0x33f0: 0x0002, 0x33f1: 0x0002, 0x33f2: 0x0002, 0x33f3: 0x0002, 0x33f4: 0x0002, 0x33f5: 0x0002, + 0x33f6: 0x0002, 0x33f7: 0x0002, 0x33f8: 0x0002, 0x33f9: 0x0002, 0x33fa: 0x0002, 0x33fb: 0x0002, + 0x33fc: 0x0002, 0x33fd: 0x0002, 0x33fe: 0x0002, 0x33ff: 0x0002, + // Block 0xd0, offset 0x3400 + 0x3400: 0x000c, 0x3401: 0x000c, 0x3402: 0x000c, 0x3403: 0x000c, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x000c, + 0x340c: 0x000c, 0x340d: 0x000c, 0x340e: 0x000c, 0x340f: 0x000c, 0x3410: 0x000c, 0x3411: 0x000c, + 0x3412: 0x000c, 0x3413: 0x000c, 0x3414: 0x000c, 0x3415: 0x000c, 0x3416: 0x000c, 0x3417: 0x000c, + 0x3418: 0x000c, 0x3419: 0x000c, 0x341a: 0x000c, 0x341b: 0x000c, 0x341c: 0x000c, 0x341d: 0x000c, + 0x341e: 0x000c, 0x341f: 0x000c, 0x3420: 0x000c, 0x3421: 0x000c, 0x3422: 0x000c, 0x3423: 0x000c, + 0x3424: 0x000c, 0x3425: 0x000c, 0x3426: 0x000c, 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x342a: 0x000c, 0x342b: 0x000c, 0x342c: 0x000c, 0x342d: 0x000c, 0x342e: 0x000c, 0x342f: 0x000c, + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3443: 0x000c, 0x3444: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x344c: 0x000c, 0x344d: 0x000c, 0x344e: 0x000c, 0x344f: 0x000c, 0x3450: 0x000c, 0x3451: 0x000c, + 0x3452: 0x000c, 0x3453: 0x000c, 0x3454: 0x000c, 0x3455: 0x000c, 0x3456: 0x000c, 0x3457: 0x000c, + 0x3458: 0x000c, 0x3459: 0x000c, 0x345a: 0x000c, 0x345b: 0x000c, 0x345c: 0x000c, 0x345d: 0x000c, + 0x345e: 0x000c, 0x345f: 0x000c, 0x3460: 0x000c, 0x3461: 0x000c, 0x3462: 0x000c, 0x3463: 0x000c, + 0x3464: 0x000c, 0x3465: 0x000c, 0x3466: 0x000c, 0x3467: 0x000c, 0x3468: 0x000c, 0x3469: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, + 0x3475: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3484: 0x000c, + 0x349b: 0x000c, 0x349c: 0x000c, 0x349d: 0x000c, + 0x349e: 0x000c, 0x349f: 0x000c, 0x34a1: 0x000c, 0x34a2: 0x000c, 0x34a3: 0x000c, + 0x34a4: 0x000c, 0x34a5: 0x000c, 0x34a6: 0x000c, 0x34a7: 0x000c, 0x34a8: 0x000c, 0x34a9: 0x000c, + 0x34aa: 0x000c, 0x34ab: 0x000c, 0x34ac: 0x000c, 0x34ad: 0x000c, 0x34ae: 0x000c, 0x34af: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001, + 0x3506: 0x0001, 0x3507: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001, + 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x0001, + 0x3518: 0x0001, 0x3519: 0x0001, 0x351a: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001, + 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3522: 0x0001, 0x3523: 0x0001, + 0x3524: 0x0001, 0x3525: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001, + 0x352a: 0x0001, 0x352b: 0x0001, 0x352c: 0x0001, 0x352d: 0x0001, 0x352e: 0x0001, 0x352f: 0x0001, + 0x3530: 0x0001, 0x3531: 0x0001, 0x3532: 0x0001, 0x3533: 0x0001, 0x3534: 0x0001, 0x3535: 0x0001, + 0x3536: 0x0001, 0x3537: 0x0001, 0x3538: 0x0001, 0x3539: 0x0001, 0x353a: 0x0001, 0x353b: 0x0001, + 0x353c: 0x0001, 0x353d: 0x0001, 0x353e: 0x0001, 0x353f: 0x0001, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0001, 0x3541: 0x0001, 0x3542: 0x0001, 0x3543: 0x0001, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x0001, + 0x354c: 0x0001, 0x354d: 0x0001, 0x354e: 0x0001, 0x354f: 0x0001, 0x3550: 0x0001, 0x3551: 0x0001, + 0x3552: 0x0001, 0x3553: 0x0001, 0x3554: 0x0001, 0x3555: 0x0001, 0x3556: 0x0001, 0x3557: 0x0001, + 0x3558: 0x0001, 0x3559: 0x0001, 0x355a: 0x0001, 0x355b: 0x0001, 0x355c: 0x0001, 0x355d: 0x0001, + 0x355e: 0x0001, 0x355f: 0x0001, 0x3560: 0x0001, 0x3561: 0x0001, 0x3562: 0x0001, 0x3563: 0x0001, + 0x3564: 0x0001, 0x3565: 0x0001, 0x3566: 0x0001, 0x3567: 0x0001, 0x3568: 0x0001, 0x3569: 0x0001, + 0x356a: 0x0001, 0x356b: 0x0001, 0x356c: 0x0001, 0x356d: 0x0001, 0x356e: 0x0001, 0x356f: 0x0001, + 0x3570: 0x0001, 0x3571: 0x0001, 0x3572: 0x0001, 0x3573: 0x0001, 0x3574: 0x0001, 0x3575: 0x0001, + 0x3576: 0x0001, 0x3577: 0x0001, 0x3578: 0x0001, 0x3579: 0x0001, 0x357a: 0x0001, 0x357b: 0x0001, + 0x357c: 0x0001, 0x357d: 0x0001, 0x357e: 0x0001, 0x357f: 0x0001, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000d, 0x3581: 0x000d, 0x3582: 0x000d, 0x3583: 0x000d, 0x3584: 0x000d, 0x3585: 0x000d, + 0x3586: 0x000d, 0x3587: 0x000d, 0x3588: 0x000d, 0x3589: 0x000d, 0x358a: 0x000d, 0x358b: 0x000d, + 0x358c: 0x000d, 0x358d: 0x000d, 0x358e: 0x000d, 0x358f: 0x000d, 0x3590: 0x000d, 0x3591: 0x000d, + 0x3592: 0x000d, 0x3593: 0x000d, 0x3594: 0x000d, 0x3595: 0x000d, 0x3596: 0x000d, 0x3597: 0x000d, + 0x3598: 0x000d, 0x3599: 0x000d, 0x359a: 0x000d, 0x359b: 0x000d, 0x359c: 0x000d, 0x359d: 0x000d, + 0x359e: 0x000d, 0x359f: 0x000d, 0x35a0: 0x000d, 0x35a1: 0x000d, 0x35a2: 0x000d, 0x35a3: 0x000d, + 0x35a4: 0x000d, 0x35a5: 0x000d, 0x35a6: 0x000d, 0x35a7: 0x000d, 0x35a8: 0x000d, 0x35a9: 0x000d, + 0x35aa: 0x000d, 0x35ab: 0x000d, 0x35ac: 0x000d, 0x35ad: 0x000d, 0x35ae: 0x000d, 0x35af: 0x000d, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000d, 0x35b3: 0x000d, 0x35b4: 0x000d, 0x35b5: 0x000d, + 0x35b6: 0x000d, 0x35b7: 0x000d, 0x35b8: 0x000d, 0x35b9: 0x000d, 0x35ba: 0x000d, 0x35bb: 0x000d, + 0x35bc: 0x000d, 0x35bd: 0x000d, 0x35be: 0x000d, 0x35bf: 0x000d, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35c8: 0x000a, 0x35c9: 0x000a, 0x35ca: 0x000a, 0x35cb: 0x000a, + 0x35cc: 0x000a, 0x35cd: 0x000a, 0x35ce: 0x000a, 0x35cf: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, + 0x35f0: 0x000a, 0x35f1: 0x000a, 0x35f2: 0x000a, 0x35f3: 0x000a, 0x35f4: 0x000a, 0x35f5: 0x000a, + 0x35f6: 0x000a, 0x35f7: 0x000a, 0x35f8: 0x000a, 0x35f9: 0x000a, 0x35fa: 0x000a, 0x35fb: 0x000a, + 0x35fc: 0x000a, 0x35fd: 0x000a, 0x35fe: 0x000a, 0x35ff: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, 0x362d: 0x000a, 0x362e: 0x000a, + 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, 0x363f: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, 0x365f: 0x000a, 0x3660: 0x000a, 0x3661: 0x000a, 0x3662: 0x000a, 0x3663: 0x000a, + 0x3664: 0x000a, 0x3665: 0x000a, 0x3666: 0x000a, 0x3667: 0x000a, 0x3668: 0x000a, 0x3669: 0x000a, + 0x366a: 0x000a, 0x366b: 0x000a, 0x366c: 0x000a, 0x366d: 0x000a, 0x366e: 0x000a, 0x366f: 0x000a, + 0x3670: 0x000a, 0x3671: 0x000a, 0x3672: 0x000a, 0x3673: 0x000a, 0x3674: 0x000a, 0x3675: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x0002, 0x3681: 0x0002, 0x3682: 0x0002, 0x3683: 0x0002, 0x3684: 0x0002, 0x3685: 0x0002, + 0x3686: 0x0002, 0x3687: 0x0002, 0x3688: 0x0002, 0x3689: 0x0002, 0x368a: 0x0002, 0x368b: 0x000a, + 0x368c: 0x000a, + 0x36af: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36ea: 0x000a, 0x36eb: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + 0x37d8: 0x000a, 0x37d9: 0x000a, 0x37da: 0x000a, 0x37db: 0x000a, 0x37dc: 0x000a, 0x37dd: 0x000a, + 0x37de: 0x000a, 0x37df: 0x000a, 0x37e0: 0x000a, 0x37e1: 0x000a, 0x37e2: 0x000a, 0x37e3: 0x000a, + 0x37e4: 0x000a, 0x37e5: 0x000a, 0x37e6: 0x000a, 0x37e7: 0x000a, 0x37e8: 0x000a, 0x37e9: 0x000a, + 0x37ea: 0x000a, 0x37eb: 0x000a, 0x37ec: 0x000a, 0x37ed: 0x000a, 0x37ee: 0x000a, 0x37ef: 0x000a, + 0x37f0: 0x000a, 0x37f1: 0x000a, 0x37f2: 0x000a, 0x37f3: 0x000a, 0x37f4: 0x000a, 0x37f5: 0x000a, + 0x37f6: 0x000a, 0x37f7: 0x000a, 0x37f8: 0x000a, 0x37f9: 0x000a, 0x37fa: 0x000a, 0x37fb: 0x000a, + 0x37fc: 0x000a, 0x37fd: 0x000a, 0x37fe: 0x000a, 0x37ff: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, 0x3801: 0x000a, 0x3802: 0x000a, 0x3803: 0x000a, 0x3804: 0x000a, 0x3805: 0x000a, + 0x3806: 0x000a, 0x3807: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, + 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, 0x3827: 0x000a, 0x3828: 0x000a, 0x3829: 0x000a, + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, 0x382d: 0x000a, 0x382e: 0x000a, 0x382f: 0x000a, + 0x3830: 0x000a, 0x3831: 0x000a, 0x3832: 0x000a, 0x3833: 0x000a, 0x3834: 0x000a, 0x3835: 0x000a, + 0x3836: 0x000a, 0x3837: 0x000a, 0x3838: 0x000a, 0x3839: 0x000a, 0x383a: 0x000a, 0x383b: 0x000a, + 0x383c: 0x000a, 0x383d: 0x000a, 0x383e: 0x000a, 0x383f: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000a, 0x3841: 0x000a, 0x3842: 0x000a, 0x3843: 0x000a, 0x3844: 0x000a, 0x3845: 0x000a, + 0x3846: 0x000a, 0x3847: 0x000a, + 0x3850: 0x000a, 0x3851: 0x000a, + 0x3852: 0x000a, 0x3853: 0x000a, 0x3854: 0x000a, 0x3855: 0x000a, 0x3856: 0x000a, 0x3857: 0x000a, + 0x3858: 0x000a, 0x3859: 0x000a, 0x385a: 0x000a, 0x385b: 0x000a, 0x385c: 0x000a, 0x385d: 0x000a, + 0x385e: 0x000a, 0x385f: 0x000a, 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, 0x3866: 0x000a, 0x3867: 0x000a, 0x3868: 0x000a, 0x3869: 0x000a, + 0x386a: 0x000a, 0x386b: 0x000a, 0x386c: 0x000a, 0x386d: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, 0x38ad: 0x000a, 0x38ae: 0x000a, 0x38af: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, 0x38d9: 0x000a, 0x38da: 0x000a, 0x38db: 0x000a, 0x38dc: 0x000a, 0x38dd: 0x000a, + 0x38de: 0x000a, 0x38df: 0x000a, 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, 0x38ef: 0x000a, + 0x38f0: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38fa: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, 0x395a: 0x000a, 0x395b: 0x000a, 0x395c: 0x000a, 0x395d: 0x000a, + 0x395e: 0x000a, 0x395f: 0x000a, 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39fe: 0x000b, 0x39ff: 0x000b, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000b, 0x3a01: 0x000b, 0x3a02: 0x000b, 0x3a03: 0x000b, 0x3a04: 0x000b, 0x3a05: 0x000b, + 0x3a06: 0x000b, 0x3a07: 0x000b, 0x3a08: 0x000b, 0x3a09: 0x000b, 0x3a0a: 0x000b, 0x3a0b: 0x000b, + 0x3a0c: 0x000b, 0x3a0d: 0x000b, 0x3a0e: 0x000b, 0x3a0f: 0x000b, 0x3a10: 0x000b, 0x3a11: 0x000b, + 0x3a12: 0x000b, 0x3a13: 0x000b, 0x3a14: 0x000b, 0x3a15: 0x000b, 0x3a16: 0x000b, 0x3a17: 0x000b, + 0x3a18: 0x000b, 0x3a19: 0x000b, 0x3a1a: 0x000b, 0x3a1b: 0x000b, 0x3a1c: 0x000b, 0x3a1d: 0x000b, + 0x3a1e: 0x000b, 0x3a1f: 0x000b, 0x3a20: 0x000b, 0x3a21: 0x000b, 0x3a22: 0x000b, 0x3a23: 0x000b, + 0x3a24: 0x000b, 0x3a25: 0x000b, 0x3a26: 0x000b, 0x3a27: 0x000b, 0x3a28: 0x000b, 0x3a29: 0x000b, + 0x3a2a: 0x000b, 0x3a2b: 0x000b, 0x3a2c: 0x000b, 0x3a2d: 0x000b, 0x3a2e: 0x000b, 0x3a2f: 0x000b, + 0x3a30: 0x000b, 0x3a31: 0x000b, 0x3a32: 0x000b, 0x3a33: 0x000b, 0x3a34: 0x000b, 0x3a35: 0x000b, + 0x3a36: 0x000b, 0x3a37: 0x000b, 0x3a38: 0x000b, 0x3a39: 0x000b, 0x3a3a: 0x000b, 0x3a3b: 0x000b, + 0x3a3c: 0x000b, 0x3a3d: 0x000b, 0x3a3e: 0x000b, 0x3a3f: 0x000b, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000c, 0x3a41: 0x000c, 0x3a42: 0x000c, 0x3a43: 0x000c, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x000c, + 0x3a4c: 0x000c, 0x3a4d: 0x000c, 0x3a4e: 0x000c, 0x3a4f: 0x000c, 0x3a50: 0x000c, 0x3a51: 0x000c, + 0x3a52: 0x000c, 0x3a53: 0x000c, 0x3a54: 0x000c, 0x3a55: 0x000c, 0x3a56: 0x000c, 0x3a57: 0x000c, + 0x3a58: 0x000c, 0x3a59: 0x000c, 0x3a5a: 0x000c, 0x3a5b: 0x000c, 0x3a5c: 0x000c, 0x3a5d: 0x000c, + 0x3a5e: 0x000c, 0x3a5f: 0x000c, 0x3a60: 0x000c, 0x3a61: 0x000c, 0x3a62: 0x000c, 0x3a63: 0x000c, + 0x3a64: 0x000c, 0x3a65: 0x000c, 0x3a66: 0x000c, 0x3a67: 0x000c, 0x3a68: 0x000c, 0x3a69: 0x000c, + 0x3a6a: 0x000c, 0x3a6b: 0x000c, 0x3a6c: 0x000c, 0x3a6d: 0x000c, 0x3a6e: 0x000c, 0x3a6f: 0x000c, + 0x3a70: 0x000b, 0x3a71: 0x000b, 0x3a72: 0x000b, 0x3a73: 0x000b, 0x3a74: 0x000b, 0x3a75: 0x000b, + 0x3a76: 0x000b, 0x3a77: 0x000b, 0x3a78: 0x000b, 0x3a79: 0x000b, 0x3a7a: 0x000b, 0x3a7b: 0x000b, + 0x3a7c: 0x000b, 0x3a7d: 0x000b, 0x3a7e: 0x000b, 0x3a7f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9f, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0xa0, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0xa1, 0x2fd: 0xa2, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa3, 0x301: 0xa4, 0x302: 0xa5, 0x304: 0xa6, 0x305: 0xa7, 0x306: 0xa8, 0x307: 0xa9, + 0x308: 0xaa, 0x30b: 0xab, 0x30c: 0x26, 0x30d: 0xac, + 0x310: 0xad, 0x311: 0xae, 0x312: 0xaf, 0x313: 0xb0, 0x316: 0xb1, 0x317: 0xb2, + 0x318: 0xb3, 0x319: 0xb4, 0x31a: 0xb5, 0x31c: 0xb6, + 0x320: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, + // Block 0xd, offset 0x340 + 0x36b: 0xc1, 0x36c: 0xc2, + 0x37e: 0xc3, + // Block 0xe, offset 0x380 + 0x3b2: 0xc4, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc5, 0x3c6: 0xc6, + 0x3c8: 0x54, 0x3c9: 0xc7, 0x3cc: 0x54, 0x3cd: 0xc8, + 0x3db: 0xc9, 0x3dc: 0xca, 0x3dd: 0xcb, 0x3de: 0xcc, 0x3df: 0xcd, + 0x3e8: 0xce, 0x3e9: 0xcf, 0x3ea: 0xd0, + // Block 0x10, offset 0x400 + 0x400: 0xd1, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xd2, 0x424: 0x9a, 0x425: 0xd3, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0xa1, 0x432: 0x0e, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd4, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd5, 0x441: 0x54, 0x442: 0xd6, 0x443: 0xd7, 0x444: 0xd8, 0x445: 0xd9, + 0x449: 0xda, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xdb, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xdc, + 0x460: 0xdd, 0x461: 0xde, 0x462: 0xdf, 0x464: 0xe0, 0x465: 0xe1, 0x466: 0xe2, 0x467: 0xe3, + 0x469: 0xe4, + 0x47f: 0xe5, + // Block 0x12, offset 0x480 + 0x4bf: 0xe5, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe6, 0x541: 0xe6, 0x542: 0xe6, 0x543: 0xe6, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe7, + 0x548: 0xe6, 0x549: 0xe6, 0x54a: 0xe6, 0x54b: 0xe6, 0x54c: 0xe6, 0x54d: 0xe6, 0x54e: 0xe6, 0x54f: 0xe6, + 0x550: 0xe6, 0x551: 0xe6, 0x552: 0xe6, 0x553: 0xe6, 0x554: 0xe6, 0x555: 0xe6, 0x556: 0xe6, 0x557: 0xe6, + 0x558: 0xe6, 0x559: 0xe6, 0x55a: 0xe6, 0x55b: 0xe6, 0x55c: 0xe6, 0x55d: 0xe6, 0x55e: 0xe6, 0x55f: 0xe6, + 0x560: 0xe6, 0x561: 0xe6, 0x562: 0xe6, 0x563: 0xe6, 0x564: 0xe6, 0x565: 0xe6, 0x566: 0xe6, 0x567: 0xe6, + 0x568: 0xe6, 0x569: 0xe6, 0x56a: 0xe6, 0x56b: 0xe6, 0x56c: 0xe6, 0x56d: 0xe6, 0x56e: 0xe6, 0x56f: 0xe6, + 0x570: 0xe6, 0x571: 0xe6, 0x572: 0xe6, 0x573: 0xe6, 0x574: 0xe6, 0x575: 0xe6, 0x576: 0xe6, 0x577: 0xe6, + 0x578: 0xe6, 0x579: 0xe6, 0x57a: 0xe6, 0x57b: 0xe6, 0x57c: 0xe6, 0x57d: 0xe6, 0x57e: 0xe6, 0x57f: 0xe6, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16568 bytes (16KiB); checksum: F50EF68C diff --git a/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index c48a97b0c28..26fbd55a124 100644 --- a/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package norm diff --git a/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go new file mode 100644 index 00000000000..7297cce32b7 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -0,0 +1,7693 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "11.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10586 bytes (10.34 KiB). Checksum: dd926e82067bee11. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f97, 0x481: 0x32a3, 0x482: 0x2fa1, 0x483: 0x32ad, 0x484: 0x2fa6, 0x485: 0x32b2, + 0x486: 0x2fab, 0x487: 0x32b7, 0x488: 0x38cc, 0x489: 0x3a5b, 0x48a: 0x2fc4, 0x48b: 0x32d0, + 0x48c: 0x2fce, 0x48d: 0x32da, 0x48e: 0x2fdd, 0x48f: 0x32e9, 0x490: 0x2fd3, 0x491: 0x32df, + 0x492: 0x2fd8, 0x493: 0x32e4, 0x494: 0x38ef, 0x495: 0x3a7e, 0x496: 0x38f6, 0x497: 0x3a85, + 0x498: 0x3019, 0x499: 0x3325, 0x49a: 0x301e, 0x49b: 0x332a, 0x49c: 0x3904, 0x49d: 0x3a93, + 0x49e: 0x3023, 0x49f: 0x332f, 0x4a0: 0x3032, 0x4a1: 0x333e, 0x4a2: 0x3050, 0x4a3: 0x335c, + 0x4a4: 0x305f, 0x4a5: 0x336b, 0x4a6: 0x3055, 0x4a7: 0x3361, 0x4a8: 0x3064, 0x4a9: 0x3370, + 0x4aa: 0x3069, 0x4ab: 0x3375, 0x4ac: 0x30af, 0x4ad: 0x33bb, 0x4ae: 0x390b, 0x4af: 0x3a9a, + 0x4b0: 0x30b9, 0x4b1: 0x33ca, 0x4b2: 0x30c3, 0x4b3: 0x33d4, 0x4b4: 0x30cd, 0x4b5: 0x33de, + 0x4b6: 0x46c4, 0x4b7: 0x4755, 0x4b8: 0x3912, 0x4b9: 0x3aa1, 0x4ba: 0x30e6, 0x4bb: 0x33f7, + 0x4bc: 0x30e1, 0x4bd: 0x33f2, 0x4be: 0x30eb, 0x4bf: 0x33fc, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f0, 0x4c1: 0x3401, 0x4c2: 0x30f5, 0x4c3: 0x3406, 0x4c4: 0x3109, 0x4c5: 0x341a, + 0x4c6: 0x3113, 0x4c7: 0x3424, 0x4c8: 0x3122, 0x4c9: 0x3433, 0x4ca: 0x311d, 0x4cb: 0x342e, + 0x4cc: 0x3935, 0x4cd: 0x3ac4, 0x4ce: 0x3943, 0x4cf: 0x3ad2, 0x4d0: 0x394a, 0x4d1: 0x3ad9, + 0x4d2: 0x3951, 0x4d3: 0x3ae0, 0x4d4: 0x314f, 0x4d5: 0x3460, 0x4d6: 0x3154, 0x4d7: 0x3465, + 0x4d8: 0x315e, 0x4d9: 0x346f, 0x4da: 0x46f1, 0x4db: 0x4782, 0x4dc: 0x3997, 0x4dd: 0x3b26, + 0x4de: 0x3177, 0x4df: 0x3488, 0x4e0: 0x3181, 0x4e1: 0x3492, 0x4e2: 0x4700, 0x4e3: 0x4791, + 0x4e4: 0x399e, 0x4e5: 0x3b2d, 0x4e6: 0x39a5, 0x4e7: 0x3b34, 0x4e8: 0x39ac, 0x4e9: 0x3b3b, + 0x4ea: 0x3190, 0x4eb: 0x34a1, 0x4ec: 0x319a, 0x4ed: 0x34b0, 0x4ee: 0x31ae, 0x4ef: 0x34c4, + 0x4f0: 0x31a9, 0x4f1: 0x34bf, 0x4f2: 0x31ea, 0x4f3: 0x3500, 0x4f4: 0x31f9, 0x4f5: 0x350f, + 0x4f6: 0x31f4, 0x4f7: 0x350a, 0x4f8: 0x39b3, 0x4f9: 0x3b42, 0x4fa: 0x39ba, 0x4fb: 0x3b49, + 0x4fc: 0x31fe, 0x4fd: 0x3514, 0x4fe: 0x3203, 0x4ff: 0x3519, + // Block 0x14, offset 0x500 + 0x500: 0x3208, 0x501: 0x351e, 0x502: 0x320d, 0x503: 0x3523, 0x504: 0x321c, 0x505: 0x3532, + 0x506: 0x3217, 0x507: 0x352d, 0x508: 0x3221, 0x509: 0x353c, 0x50a: 0x3226, 0x50b: 0x3541, + 0x50c: 0x322b, 0x50d: 0x3546, 0x50e: 0x3249, 0x50f: 0x3564, 0x510: 0x3262, 0x511: 0x3582, + 0x512: 0x3271, 0x513: 0x3591, 0x514: 0x3276, 0x515: 0x3596, 0x516: 0x337a, 0x517: 0x34a6, + 0x518: 0x3537, 0x519: 0x3573, 0x51b: 0x35d1, + 0x520: 0x46a1, 0x521: 0x4732, 0x522: 0x2f83, 0x523: 0x328f, + 0x524: 0x3878, 0x525: 0x3a07, 0x526: 0x3871, 0x527: 0x3a00, 0x528: 0x3886, 0x529: 0x3a15, + 0x52a: 0x387f, 0x52b: 0x3a0e, 0x52c: 0x38be, 0x52d: 0x3a4d, 0x52e: 0x3894, 0x52f: 0x3a23, + 0x530: 0x388d, 0x531: 0x3a1c, 0x532: 0x38a2, 0x533: 0x3a31, 0x534: 0x389b, 0x535: 0x3a2a, + 0x536: 0x38c5, 0x537: 0x3a54, 0x538: 0x46b5, 0x539: 0x4746, 0x53a: 0x3000, 0x53b: 0x330c, + 0x53c: 0x2fec, 0x53d: 0x32f8, 0x53e: 0x38da, 0x53f: 0x3a69, + // Block 0x15, offset 0x540 + 0x540: 0x38d3, 0x541: 0x3a62, 0x542: 0x38e8, 0x543: 0x3a77, 0x544: 0x38e1, 0x545: 0x3a70, + 0x546: 0x38fd, 0x547: 0x3a8c, 0x548: 0x3091, 0x549: 0x339d, 0x54a: 0x30a5, 0x54b: 0x33b1, + 0x54c: 0x46e7, 0x54d: 0x4778, 0x54e: 0x3136, 0x54f: 0x3447, 0x550: 0x3920, 0x551: 0x3aaf, + 0x552: 0x3919, 0x553: 0x3aa8, 0x554: 0x392e, 0x555: 0x3abd, 0x556: 0x3927, 0x557: 0x3ab6, + 0x558: 0x3989, 0x559: 0x3b18, 0x55a: 0x396d, 0x55b: 0x3afc, 0x55c: 0x3966, 0x55d: 0x3af5, + 0x55e: 0x397b, 0x55f: 0x3b0a, 0x560: 0x3974, 0x561: 0x3b03, 0x562: 0x3982, 0x563: 0x3b11, + 0x564: 0x31e5, 0x565: 0x34fb, 0x566: 0x31c7, 0x567: 0x34dd, 0x568: 0x39e4, 0x569: 0x3b73, + 0x56a: 0x39dd, 0x56b: 0x3b6c, 0x56c: 0x39f2, 0x56d: 0x3b81, 0x56e: 0x39eb, 0x56f: 0x3b7a, + 0x570: 0x39f9, 0x571: 0x3b88, 0x572: 0x3230, 0x573: 0x354b, 0x574: 0x3258, 0x575: 0x3578, + 0x576: 0x3253, 0x577: 0x356e, 0x578: 0x323f, 0x579: 0x355a, + // Block 0x16, offset 0x580 + 0x580: 0x4804, 0x581: 0x480a, 0x582: 0x491e, 0x583: 0x4936, 0x584: 0x4926, 0x585: 0x493e, + 0x586: 0x492e, 0x587: 0x4946, 0x588: 0x47aa, 0x589: 0x47b0, 0x58a: 0x488e, 0x58b: 0x48a6, + 0x58c: 0x4896, 0x58d: 0x48ae, 0x58e: 0x489e, 0x58f: 0x48b6, 0x590: 0x4816, 0x591: 0x481c, + 0x592: 0x3db8, 0x593: 0x3dc8, 0x594: 0x3dc0, 0x595: 0x3dd0, + 0x598: 0x47b6, 0x599: 0x47bc, 0x59a: 0x3ce8, 0x59b: 0x3cf8, 0x59c: 0x3cf0, 0x59d: 0x3d00, + 0x5a0: 0x482e, 0x5a1: 0x4834, 0x5a2: 0x494e, 0x5a3: 0x4966, + 0x5a4: 0x4956, 0x5a5: 0x496e, 0x5a6: 0x495e, 0x5a7: 0x4976, 0x5a8: 0x47c2, 0x5a9: 0x47c8, + 0x5aa: 0x48be, 0x5ab: 0x48d6, 0x5ac: 0x48c6, 0x5ad: 0x48de, 0x5ae: 0x48ce, 0x5af: 0x48e6, + 0x5b0: 0x4846, 0x5b1: 0x484c, 0x5b2: 0x3e18, 0x5b3: 0x3e30, 0x5b4: 0x3e20, 0x5b5: 0x3e38, + 0x5b6: 0x3e28, 0x5b7: 0x3e40, 0x5b8: 0x47ce, 0x5b9: 0x47d4, 0x5ba: 0x3d18, 0x5bb: 0x3d30, + 0x5bc: 0x3d20, 0x5bd: 0x3d38, 0x5be: 0x3d28, 0x5bf: 0x3d40, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4852, 0x5c1: 0x4858, 0x5c2: 0x3e48, 0x5c3: 0x3e58, 0x5c4: 0x3e50, 0x5c5: 0x3e60, + 0x5c8: 0x47da, 0x5c9: 0x47e0, 0x5ca: 0x3d48, 0x5cb: 0x3d58, + 0x5cc: 0x3d50, 0x5cd: 0x3d60, 0x5d0: 0x4864, 0x5d1: 0x486a, + 0x5d2: 0x3e80, 0x5d3: 0x3e98, 0x5d4: 0x3e88, 0x5d5: 0x3ea0, 0x5d6: 0x3e90, 0x5d7: 0x3ea8, + 0x5d9: 0x47e6, 0x5db: 0x3d68, 0x5dd: 0x3d70, + 0x5df: 0x3d78, 0x5e0: 0x487c, 0x5e1: 0x4882, 0x5e2: 0x497e, 0x5e3: 0x4996, + 0x5e4: 0x4986, 0x5e5: 0x499e, 0x5e6: 0x498e, 0x5e7: 0x49a6, 0x5e8: 0x47ec, 0x5e9: 0x47f2, + 0x5ea: 0x48ee, 0x5eb: 0x4906, 0x5ec: 0x48f6, 0x5ed: 0x490e, 0x5ee: 0x48fe, 0x5ef: 0x4916, + 0x5f0: 0x47f8, 0x5f1: 0x431e, 0x5f2: 0x3691, 0x5f3: 0x4324, 0x5f4: 0x4822, 0x5f5: 0x432a, + 0x5f6: 0x36a3, 0x5f7: 0x4330, 0x5f8: 0x36c1, 0x5f9: 0x4336, 0x5fa: 0x36d9, 0x5fb: 0x433c, + 0x5fc: 0x4870, 0x5fd: 0x4342, + // Block 0x18, offset 0x600 + 0x600: 0x3da0, 0x601: 0x3da8, 0x602: 0x4184, 0x603: 0x41a2, 0x604: 0x418e, 0x605: 0x41ac, + 0x606: 0x4198, 0x607: 0x41b6, 0x608: 0x3cd8, 0x609: 0x3ce0, 0x60a: 0x40d0, 0x60b: 0x40ee, + 0x60c: 0x40da, 0x60d: 0x40f8, 0x60e: 0x40e4, 0x60f: 0x4102, 0x610: 0x3de8, 0x611: 0x3df0, + 0x612: 0x41c0, 0x613: 0x41de, 0x614: 0x41ca, 0x615: 0x41e8, 0x616: 0x41d4, 0x617: 0x41f2, + 0x618: 0x3d08, 0x619: 0x3d10, 0x61a: 0x410c, 0x61b: 0x412a, 0x61c: 0x4116, 0x61d: 0x4134, + 0x61e: 0x4120, 0x61f: 0x413e, 0x620: 0x3ec0, 0x621: 0x3ec8, 0x622: 0x41fc, 0x623: 0x421a, + 0x624: 0x4206, 0x625: 0x4224, 0x626: 0x4210, 0x627: 0x422e, 0x628: 0x3d80, 0x629: 0x3d88, + 0x62a: 0x4148, 0x62b: 0x4166, 0x62c: 0x4152, 0x62d: 0x4170, 0x62e: 0x415c, 0x62f: 0x417a, + 0x630: 0x3685, 0x631: 0x367f, 0x632: 0x3d90, 0x633: 0x368b, 0x634: 0x3d98, + 0x636: 0x4810, 0x637: 0x3db0, 0x638: 0x35f5, 0x639: 0x35ef, 0x63a: 0x35e3, 0x63b: 0x42ee, + 0x63c: 0x35fb, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35a7, 0x642: 0x3dd8, 0x643: 0x369d, 0x644: 0x3de0, + 0x646: 0x483a, 0x647: 0x3df8, 0x648: 0x3601, 0x649: 0x42f4, 0x64a: 0x360d, 0x64b: 0x42fa, + 0x64c: 0x3619, 0x64d: 0x3b8f, 0x64e: 0x3b96, 0x64f: 0x3b9d, 0x650: 0x36b5, 0x651: 0x36af, + 0x652: 0x3e00, 0x653: 0x44e4, 0x656: 0x36bb, 0x657: 0x3e10, + 0x658: 0x3631, 0x659: 0x362b, 0x65a: 0x361f, 0x65b: 0x4300, 0x65d: 0x3ba4, + 0x65e: 0x3bab, 0x65f: 0x3bb2, 0x660: 0x36eb, 0x661: 0x36e5, 0x662: 0x3e68, 0x663: 0x44ec, + 0x664: 0x36cd, 0x665: 0x36d3, 0x666: 0x36f1, 0x667: 0x3e78, 0x668: 0x3661, 0x669: 0x365b, + 0x66a: 0x364f, 0x66b: 0x430c, 0x66c: 0x3649, 0x66d: 0x359b, 0x66e: 0x42e8, 0x66f: 0x0081, + 0x672: 0x3eb0, 0x673: 0x36f7, 0x674: 0x3eb8, + 0x676: 0x4888, 0x677: 0x3ed0, 0x678: 0x363d, 0x679: 0x4306, 0x67a: 0x366d, 0x67b: 0x4318, + 0x67c: 0x3679, 0x67d: 0x4256, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c06, 0x683: 0xa000, 0x684: 0x3c0d, 0x685: 0xa000, + 0x687: 0x3c14, 0x688: 0xa000, 0x689: 0x3c1b, + 0x68d: 0xa000, + 0x6a0: 0x2f65, 0x6a1: 0xa000, 0x6a2: 0x3c29, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c22, 0x6ae: 0x2f60, 0x6af: 0x2f6a, + 0x6b0: 0x3c30, 0x6b1: 0x3c37, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c3e, 0x6b5: 0x3c45, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4c, 0x6b9: 0x3c53, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5a, 0x6c1: 0x3c61, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c76, 0x6c5: 0x3c7d, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c84, 0x6c9: 0x3c8b, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca0, 0x6ed: 0x3ca7, 0x6ee: 0x3cae, 0x6ef: 0x3cb5, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f08, 0x70d: 0xa000, 0x70e: 0x3f10, 0x70f: 0xa000, 0x710: 0x3f18, 0x711: 0xa000, + 0x712: 0x3f20, 0x713: 0xa000, 0x714: 0x3f28, 0x715: 0xa000, 0x716: 0x3f30, 0x717: 0xa000, + 0x718: 0x3f38, 0x719: 0xa000, 0x71a: 0x3f40, 0x71b: 0xa000, 0x71c: 0x3f48, 0x71d: 0xa000, + 0x71e: 0x3f50, 0x71f: 0xa000, 0x720: 0x3f58, 0x721: 0xa000, 0x722: 0x3f60, + 0x724: 0xa000, 0x725: 0x3f68, 0x726: 0xa000, 0x727: 0x3f70, 0x728: 0xa000, 0x729: 0x3f78, + 0x72f: 0xa000, + 0x730: 0x3f80, 0x731: 0x3f88, 0x732: 0xa000, 0x733: 0x3f90, 0x734: 0x3f98, 0x735: 0xa000, + 0x736: 0x3fa0, 0x737: 0x3fa8, 0x738: 0xa000, 0x739: 0x3fb0, 0x73a: 0x3fb8, 0x73b: 0xa000, + 0x73c: 0x3fc0, 0x73d: 0x3fc8, + // Block 0x1d, offset 0x740 + 0x754: 0x3f00, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd0, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe0, 0x76d: 0xa000, 0x76e: 0x3fe8, 0x76f: 0xa000, + 0x770: 0x3ff0, 0x771: 0xa000, 0x772: 0x3ff8, 0x773: 0xa000, 0x774: 0x4000, 0x775: 0xa000, + 0x776: 0x4008, 0x777: 0xa000, 0x778: 0x4010, 0x779: 0xa000, 0x77a: 0x4018, 0x77b: 0xa000, + 0x77c: 0x4020, 0x77d: 0xa000, 0x77e: 0x4028, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4030, 0x781: 0xa000, 0x782: 0x4038, 0x784: 0xa000, 0x785: 0x4040, + 0x786: 0xa000, 0x787: 0x4048, 0x788: 0xa000, 0x789: 0x4050, + 0x78f: 0xa000, 0x790: 0x4058, 0x791: 0x4060, + 0x792: 0xa000, 0x793: 0x4068, 0x794: 0x4070, 0x795: 0xa000, 0x796: 0x4078, 0x797: 0x4080, + 0x798: 0xa000, 0x799: 0x4088, 0x79a: 0x4090, 0x79b: 0xa000, 0x79c: 0x4098, 0x79d: 0x40a0, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fd8, + 0x7b7: 0x40a8, 0x7b8: 0x40b0, 0x7b9: 0x40b8, 0x7ba: 0x40c0, + 0x7bd: 0xa000, 0x7be: 0x40c8, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, + 0x3a8: 0xb3, 0x3a9: 0xb4, 0x3aa: 0xb5, + 0x3b0: 0x73, 0x3b5: 0xb6, 0x3b6: 0xb7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb8, 0x3ec: 0xb9, + // Block 0x10, offset 0x400 + 0x432: 0xba, + // Block 0x11, offset 0x440 + 0x445: 0xbb, 0x446: 0xbc, 0x447: 0xbd, + 0x449: 0xbe, + // Block 0x12, offset 0x480 + 0x480: 0xbf, + 0x4a3: 0xc0, 0x4a5: 0xc1, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc2, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 149 entries, 298 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xcf, 0xd1, 0xd6, 0xe7, 0xf3, 0xf5, 0xfb, 0xfd, 0xff, 0x101, 0x103, 0x105, 0x107, 0x10a, 0x10d, 0x10f, 0x112, 0x115, 0x119, 0x11e, 0x127, 0x129, 0x12c, 0x12e, 0x139, 0x13d, 0x14b, 0x14e, 0x154, 0x15a, 0x165, 0x169, 0x16b, 0x16d, 0x16f, 0x171, 0x173, 0x179, 0x17d, 0x17f, 0x181, 0x189, 0x18d, 0x190, 0x192, 0x194, 0x196, 0x199, 0x19b, 0x19d, 0x19f, 0x1a1, 0x1a7, 0x1aa, 0x1ac, 0x1b3, 0x1b9, 0x1bf, 0x1c7, 0x1cd, 0x1d3, 0x1d9, 0x1dd, 0x1eb, 0x1f4, 0x1f7, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x205, 0x20a, 0x20c, 0x20e, 0x213, 0x219, 0x21b, 0x21d, 0x21f, 0x225, 0x228, 0x22a, 0x230, 0x233, 0x23b, 0x242, 0x245, 0x248, 0x24a, 0x24d, 0x255, 0x259, 0x260, 0x263, 0x269, 0x26b, 0x26e, 0x270, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x284, 0x291, 0x29b, 0x29d, 0x29f, 0x2a5, 0x2a7, 0x2aa} + +// nfcSparseValues: 684 entries, 2736 bytes +var nfcSparseValues = [684]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd1 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe7 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf5 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfb + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfd + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0xff + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x101 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x103 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x107 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x10f + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x112 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x115 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x119 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x129 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12e + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x139 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13d + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14b + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14e + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x154 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15a + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x165 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x169 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16b + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x173 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x179 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x17f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x181 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x190 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x192 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x196 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x199 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a1 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ac + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1b9 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1bf + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1d9 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1dd + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1eb + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f4 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f7 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x205 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20e + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x21f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x225 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x230 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x233 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x242 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x245 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x255 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x259 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x263 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x269 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x270 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x86, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x87, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x88, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8a, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8b, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8c, offset 0x282 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8d, offset 0x284 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x291 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8f, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x90, offset 0x29d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x91, offset 0x29f + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x92, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x93, offset 0x2a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17248 bytes (16.84 KiB). Checksum: 4fb368372b6b1b27. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d26, 0x447: 0xa000, 0x448: 0x2d2e, 0x449: 0xa000, 0x44a: 0x2d36, 0x44b: 0xa000, + 0x44c: 0x2d3e, 0x44d: 0xa000, 0x44e: 0x2d46, 0x451: 0xa000, + 0x452: 0x2d4e, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d56, + 0x47c: 0xa000, 0x47d: 0x2d5e, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f97, 0x541: 0x32a3, 0x542: 0x2fa1, 0x543: 0x32ad, 0x544: 0x2fa6, 0x545: 0x32b2, + 0x546: 0x2fab, 0x547: 0x32b7, 0x548: 0x38cc, 0x549: 0x3a5b, 0x54a: 0x2fc4, 0x54b: 0x32d0, + 0x54c: 0x2fce, 0x54d: 0x32da, 0x54e: 0x2fdd, 0x54f: 0x32e9, 0x550: 0x2fd3, 0x551: 0x32df, + 0x552: 0x2fd8, 0x553: 0x32e4, 0x554: 0x38ef, 0x555: 0x3a7e, 0x556: 0x38f6, 0x557: 0x3a85, + 0x558: 0x3019, 0x559: 0x3325, 0x55a: 0x301e, 0x55b: 0x332a, 0x55c: 0x3904, 0x55d: 0x3a93, + 0x55e: 0x3023, 0x55f: 0x332f, 0x560: 0x3032, 0x561: 0x333e, 0x562: 0x3050, 0x563: 0x335c, + 0x564: 0x305f, 0x565: 0x336b, 0x566: 0x3055, 0x567: 0x3361, 0x568: 0x3064, 0x569: 0x3370, + 0x56a: 0x3069, 0x56b: 0x3375, 0x56c: 0x30af, 0x56d: 0x33bb, 0x56e: 0x390b, 0x56f: 0x3a9a, + 0x570: 0x30b9, 0x571: 0x33ca, 0x572: 0x30c3, 0x573: 0x33d4, 0x574: 0x30cd, 0x575: 0x33de, + 0x576: 0x46c4, 0x577: 0x4755, 0x578: 0x3912, 0x579: 0x3aa1, 0x57a: 0x30e6, 0x57b: 0x33f7, + 0x57c: 0x30e1, 0x57d: 0x33f2, 0x57e: 0x30eb, 0x57f: 0x33fc, + // Block 0x16, offset 0x580 + 0x580: 0x30f0, 0x581: 0x3401, 0x582: 0x30f5, 0x583: 0x3406, 0x584: 0x3109, 0x585: 0x341a, + 0x586: 0x3113, 0x587: 0x3424, 0x588: 0x3122, 0x589: 0x3433, 0x58a: 0x311d, 0x58b: 0x342e, + 0x58c: 0x3935, 0x58d: 0x3ac4, 0x58e: 0x3943, 0x58f: 0x3ad2, 0x590: 0x394a, 0x591: 0x3ad9, + 0x592: 0x3951, 0x593: 0x3ae0, 0x594: 0x314f, 0x595: 0x3460, 0x596: 0x3154, 0x597: 0x3465, + 0x598: 0x315e, 0x599: 0x346f, 0x59a: 0x46f1, 0x59b: 0x4782, 0x59c: 0x3997, 0x59d: 0x3b26, + 0x59e: 0x3177, 0x59f: 0x3488, 0x5a0: 0x3181, 0x5a1: 0x3492, 0x5a2: 0x4700, 0x5a3: 0x4791, + 0x5a4: 0x399e, 0x5a5: 0x3b2d, 0x5a6: 0x39a5, 0x5a7: 0x3b34, 0x5a8: 0x39ac, 0x5a9: 0x3b3b, + 0x5aa: 0x3190, 0x5ab: 0x34a1, 0x5ac: 0x319a, 0x5ad: 0x34b0, 0x5ae: 0x31ae, 0x5af: 0x34c4, + 0x5b0: 0x31a9, 0x5b1: 0x34bf, 0x5b2: 0x31ea, 0x5b3: 0x3500, 0x5b4: 0x31f9, 0x5b5: 0x350f, + 0x5b6: 0x31f4, 0x5b7: 0x350a, 0x5b8: 0x39b3, 0x5b9: 0x3b42, 0x5ba: 0x39ba, 0x5bb: 0x3b49, + 0x5bc: 0x31fe, 0x5bd: 0x3514, 0x5be: 0x3203, 0x5bf: 0x3519, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3208, 0x5c1: 0x351e, 0x5c2: 0x320d, 0x5c3: 0x3523, 0x5c4: 0x321c, 0x5c5: 0x3532, + 0x5c6: 0x3217, 0x5c7: 0x352d, 0x5c8: 0x3221, 0x5c9: 0x353c, 0x5ca: 0x3226, 0x5cb: 0x3541, + 0x5cc: 0x322b, 0x5cd: 0x3546, 0x5ce: 0x3249, 0x5cf: 0x3564, 0x5d0: 0x3262, 0x5d1: 0x3582, + 0x5d2: 0x3271, 0x5d3: 0x3591, 0x5d4: 0x3276, 0x5d5: 0x3596, 0x5d6: 0x337a, 0x5d7: 0x34a6, + 0x5d8: 0x3537, 0x5d9: 0x3573, 0x5da: 0x1be0, 0x5db: 0x42d7, + 0x5e0: 0x46a1, 0x5e1: 0x4732, 0x5e2: 0x2f83, 0x5e3: 0x328f, + 0x5e4: 0x3878, 0x5e5: 0x3a07, 0x5e6: 0x3871, 0x5e7: 0x3a00, 0x5e8: 0x3886, 0x5e9: 0x3a15, + 0x5ea: 0x387f, 0x5eb: 0x3a0e, 0x5ec: 0x38be, 0x5ed: 0x3a4d, 0x5ee: 0x3894, 0x5ef: 0x3a23, + 0x5f0: 0x388d, 0x5f1: 0x3a1c, 0x5f2: 0x38a2, 0x5f3: 0x3a31, 0x5f4: 0x389b, 0x5f5: 0x3a2a, + 0x5f6: 0x38c5, 0x5f7: 0x3a54, 0x5f8: 0x46b5, 0x5f9: 0x4746, 0x5fa: 0x3000, 0x5fb: 0x330c, + 0x5fc: 0x2fec, 0x5fd: 0x32f8, 0x5fe: 0x38da, 0x5ff: 0x3a69, + // Block 0x18, offset 0x600 + 0x600: 0x38d3, 0x601: 0x3a62, 0x602: 0x38e8, 0x603: 0x3a77, 0x604: 0x38e1, 0x605: 0x3a70, + 0x606: 0x38fd, 0x607: 0x3a8c, 0x608: 0x3091, 0x609: 0x339d, 0x60a: 0x30a5, 0x60b: 0x33b1, + 0x60c: 0x46e7, 0x60d: 0x4778, 0x60e: 0x3136, 0x60f: 0x3447, 0x610: 0x3920, 0x611: 0x3aaf, + 0x612: 0x3919, 0x613: 0x3aa8, 0x614: 0x392e, 0x615: 0x3abd, 0x616: 0x3927, 0x617: 0x3ab6, + 0x618: 0x3989, 0x619: 0x3b18, 0x61a: 0x396d, 0x61b: 0x3afc, 0x61c: 0x3966, 0x61d: 0x3af5, + 0x61e: 0x397b, 0x61f: 0x3b0a, 0x620: 0x3974, 0x621: 0x3b03, 0x622: 0x3982, 0x623: 0x3b11, + 0x624: 0x31e5, 0x625: 0x34fb, 0x626: 0x31c7, 0x627: 0x34dd, 0x628: 0x39e4, 0x629: 0x3b73, + 0x62a: 0x39dd, 0x62b: 0x3b6c, 0x62c: 0x39f2, 0x62d: 0x3b81, 0x62e: 0x39eb, 0x62f: 0x3b7a, + 0x630: 0x39f9, 0x631: 0x3b88, 0x632: 0x3230, 0x633: 0x354b, 0x634: 0x3258, 0x635: 0x3578, + 0x636: 0x3253, 0x637: 0x356e, 0x638: 0x323f, 0x639: 0x355a, + // Block 0x19, offset 0x640 + 0x640: 0x4804, 0x641: 0x480a, 0x642: 0x491e, 0x643: 0x4936, 0x644: 0x4926, 0x645: 0x493e, + 0x646: 0x492e, 0x647: 0x4946, 0x648: 0x47aa, 0x649: 0x47b0, 0x64a: 0x488e, 0x64b: 0x48a6, + 0x64c: 0x4896, 0x64d: 0x48ae, 0x64e: 0x489e, 0x64f: 0x48b6, 0x650: 0x4816, 0x651: 0x481c, + 0x652: 0x3db8, 0x653: 0x3dc8, 0x654: 0x3dc0, 0x655: 0x3dd0, + 0x658: 0x47b6, 0x659: 0x47bc, 0x65a: 0x3ce8, 0x65b: 0x3cf8, 0x65c: 0x3cf0, 0x65d: 0x3d00, + 0x660: 0x482e, 0x661: 0x4834, 0x662: 0x494e, 0x663: 0x4966, + 0x664: 0x4956, 0x665: 0x496e, 0x666: 0x495e, 0x667: 0x4976, 0x668: 0x47c2, 0x669: 0x47c8, + 0x66a: 0x48be, 0x66b: 0x48d6, 0x66c: 0x48c6, 0x66d: 0x48de, 0x66e: 0x48ce, 0x66f: 0x48e6, + 0x670: 0x4846, 0x671: 0x484c, 0x672: 0x3e18, 0x673: 0x3e30, 0x674: 0x3e20, 0x675: 0x3e38, + 0x676: 0x3e28, 0x677: 0x3e40, 0x678: 0x47ce, 0x679: 0x47d4, 0x67a: 0x3d18, 0x67b: 0x3d30, + 0x67c: 0x3d20, 0x67d: 0x3d38, 0x67e: 0x3d28, 0x67f: 0x3d40, + // Block 0x1a, offset 0x680 + 0x680: 0x4852, 0x681: 0x4858, 0x682: 0x3e48, 0x683: 0x3e58, 0x684: 0x3e50, 0x685: 0x3e60, + 0x688: 0x47da, 0x689: 0x47e0, 0x68a: 0x3d48, 0x68b: 0x3d58, + 0x68c: 0x3d50, 0x68d: 0x3d60, 0x690: 0x4864, 0x691: 0x486a, + 0x692: 0x3e80, 0x693: 0x3e98, 0x694: 0x3e88, 0x695: 0x3ea0, 0x696: 0x3e90, 0x697: 0x3ea8, + 0x699: 0x47e6, 0x69b: 0x3d68, 0x69d: 0x3d70, + 0x69f: 0x3d78, 0x6a0: 0x487c, 0x6a1: 0x4882, 0x6a2: 0x497e, 0x6a3: 0x4996, + 0x6a4: 0x4986, 0x6a5: 0x499e, 0x6a6: 0x498e, 0x6a7: 0x49a6, 0x6a8: 0x47ec, 0x6a9: 0x47f2, + 0x6aa: 0x48ee, 0x6ab: 0x4906, 0x6ac: 0x48f6, 0x6ad: 0x490e, 0x6ae: 0x48fe, 0x6af: 0x4916, + 0x6b0: 0x47f8, 0x6b1: 0x431e, 0x6b2: 0x3691, 0x6b3: 0x4324, 0x6b4: 0x4822, 0x6b5: 0x432a, + 0x6b6: 0x36a3, 0x6b7: 0x4330, 0x6b8: 0x36c1, 0x6b9: 0x4336, 0x6ba: 0x36d9, 0x6bb: 0x433c, + 0x6bc: 0x4870, 0x6bd: 0x4342, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da0, 0x6c1: 0x3da8, 0x6c2: 0x4184, 0x6c3: 0x41a2, 0x6c4: 0x418e, 0x6c5: 0x41ac, + 0x6c6: 0x4198, 0x6c7: 0x41b6, 0x6c8: 0x3cd8, 0x6c9: 0x3ce0, 0x6ca: 0x40d0, 0x6cb: 0x40ee, + 0x6cc: 0x40da, 0x6cd: 0x40f8, 0x6ce: 0x40e4, 0x6cf: 0x4102, 0x6d0: 0x3de8, 0x6d1: 0x3df0, + 0x6d2: 0x41c0, 0x6d3: 0x41de, 0x6d4: 0x41ca, 0x6d5: 0x41e8, 0x6d6: 0x41d4, 0x6d7: 0x41f2, + 0x6d8: 0x3d08, 0x6d9: 0x3d10, 0x6da: 0x410c, 0x6db: 0x412a, 0x6dc: 0x4116, 0x6dd: 0x4134, + 0x6de: 0x4120, 0x6df: 0x413e, 0x6e0: 0x3ec0, 0x6e1: 0x3ec8, 0x6e2: 0x41fc, 0x6e3: 0x421a, + 0x6e4: 0x4206, 0x6e5: 0x4224, 0x6e6: 0x4210, 0x6e7: 0x422e, 0x6e8: 0x3d80, 0x6e9: 0x3d88, + 0x6ea: 0x4148, 0x6eb: 0x4166, 0x6ec: 0x4152, 0x6ed: 0x4170, 0x6ee: 0x415c, 0x6ef: 0x417a, + 0x6f0: 0x3685, 0x6f1: 0x367f, 0x6f2: 0x3d90, 0x6f3: 0x368b, 0x6f4: 0x3d98, + 0x6f6: 0x4810, 0x6f7: 0x3db0, 0x6f8: 0x35f5, 0x6f9: 0x35ef, 0x6fa: 0x35e3, 0x6fb: 0x42ee, + 0x6fc: 0x35fb, 0x6fd: 0x4287, 0x6fe: 0x01d3, 0x6ff: 0x4287, + // Block 0x1c, offset 0x700 + 0x700: 0x42a0, 0x701: 0x4482, 0x702: 0x3dd8, 0x703: 0x369d, 0x704: 0x3de0, + 0x706: 0x483a, 0x707: 0x3df8, 0x708: 0x3601, 0x709: 0x42f4, 0x70a: 0x360d, 0x70b: 0x42fa, + 0x70c: 0x3619, 0x70d: 0x4489, 0x70e: 0x4490, 0x70f: 0x4497, 0x710: 0x36b5, 0x711: 0x36af, + 0x712: 0x3e00, 0x713: 0x44e4, 0x716: 0x36bb, 0x717: 0x3e10, + 0x718: 0x3631, 0x719: 0x362b, 0x71a: 0x361f, 0x71b: 0x4300, 0x71d: 0x449e, + 0x71e: 0x44a5, 0x71f: 0x44ac, 0x720: 0x36eb, 0x721: 0x36e5, 0x722: 0x3e68, 0x723: 0x44ec, + 0x724: 0x36cd, 0x725: 0x36d3, 0x726: 0x36f1, 0x727: 0x3e78, 0x728: 0x3661, 0x729: 0x365b, + 0x72a: 0x364f, 0x72b: 0x430c, 0x72c: 0x3649, 0x72d: 0x4474, 0x72e: 0x447b, 0x72f: 0x0081, + 0x732: 0x3eb0, 0x733: 0x36f7, 0x734: 0x3eb8, + 0x736: 0x4888, 0x737: 0x3ed0, 0x738: 0x363d, 0x739: 0x4306, 0x73a: 0x366d, 0x73b: 0x4318, + 0x73c: 0x3679, 0x73d: 0x425a, 0x73e: 0x428c, + // Block 0x1d, offset 0x740 + 0x740: 0x1bd8, 0x741: 0x1bdc, 0x742: 0x0047, 0x743: 0x1c54, 0x745: 0x1be8, + 0x746: 0x1bec, 0x747: 0x00e9, 0x749: 0x1c58, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x198d, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x199f, 0x761: 0x1bc8, 0x762: 0x19a8, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d2, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b98, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x222e, 0x791: 0x223a, + 0x792: 0x22ee, 0x793: 0x2216, 0x794: 0x229a, 0x795: 0x2222, 0x796: 0x22a0, 0x797: 0x22b8, + 0x798: 0x22c4, 0x799: 0x2228, 0x79a: 0x22ca, 0x79b: 0x2234, 0x79c: 0x22be, 0x79d: 0x22d0, + 0x79e: 0x22d6, 0x79f: 0x1cbc, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba4, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ab, 0x7a6: 0x1bd0, 0x7a7: 0x1d48, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19b7, 0x7ab: 0x1bd4, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e4, 0x7b2: 0x1c18, 0x7b3: 0x19ed, 0x7b4: 0x00ad, 0x7b5: 0x1a62, + 0x7b6: 0x1c4c, 0x7b7: 0x1d5c, 0x7b8: 0x19f0, 0x7b9: 0x00b1, 0x7ba: 0x1a65, 0x7bb: 0x1c50, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c06, 0x7c3: 0xa000, 0x7c4: 0x3c0d, 0x7c5: 0xa000, + 0x7c7: 0x3c14, 0x7c8: 0xa000, 0x7c9: 0x3c1b, + 0x7cd: 0xa000, + 0x7e0: 0x2f65, 0x7e1: 0xa000, 0x7e2: 0x3c29, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c22, 0x7ee: 0x2f60, 0x7ef: 0x2f6a, + 0x7f0: 0x3c30, 0x7f1: 0x3c37, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c3e, 0x7f5: 0x3c45, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4c, 0x7f9: 0x3c53, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5a, 0x801: 0x3c61, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c76, 0x805: 0x3c7d, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c84, 0x809: 0x3c8b, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca0, 0x82d: 0x3ca7, 0x82e: 0x3cae, 0x82f: 0x3cb5, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a68, 0x875: 0x1a6c, + 0x876: 0x1a70, 0x877: 0x1a74, 0x878: 0x1a78, 0x879: 0x1a7c, 0x87a: 0x1a80, 0x87b: 0x1a84, + 0x87c: 0x1a88, 0x87d: 0x1c80, 0x87e: 0x1c85, 0x87f: 0x1c8a, + // Block 0x22, offset 0x880 + 0x880: 0x1c8f, 0x881: 0x1c94, 0x882: 0x1c99, 0x883: 0x1c9e, 0x884: 0x1ca3, 0x885: 0x1ca8, + 0x886: 0x1cad, 0x887: 0x1cb2, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b60, + 0x892: 0x1b64, 0x893: 0x1b68, 0x894: 0x1b6c, 0x895: 0x1b70, 0x896: 0x1b74, 0x897: 0x1b78, + 0x898: 0x1b7c, 0x899: 0x1b80, 0x89a: 0x1b84, 0x89b: 0x1b88, 0x89c: 0x1af4, 0x89d: 0x1af8, + 0x89e: 0x1afc, 0x89f: 0x1b00, 0x8a0: 0x1b04, 0x8a1: 0x1b08, 0x8a2: 0x1b0c, 0x8a3: 0x1b10, + 0x8a4: 0x1b14, 0x8a5: 0x1b18, 0x8a6: 0x1b1c, 0x8a7: 0x1b20, 0x8a8: 0x1b24, 0x8a9: 0x1b28, + 0x8aa: 0x1b2c, 0x8ab: 0x1b30, 0x8ac: 0x1b34, 0x8ad: 0x1b38, 0x8ae: 0x1b3c, 0x8af: 0x1b40, + 0x8b0: 0x1b44, 0x8b1: 0x1b48, 0x8b2: 0x1b4c, 0x8b3: 0x1b50, 0x8b4: 0x1b54, 0x8b5: 0x1b58, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f08, 0x98d: 0xa000, 0x98e: 0x3f10, 0x98f: 0xa000, 0x990: 0x3f18, 0x991: 0xa000, + 0x992: 0x3f20, 0x993: 0xa000, 0x994: 0x3f28, 0x995: 0xa000, 0x996: 0x3f30, 0x997: 0xa000, + 0x998: 0x3f38, 0x999: 0xa000, 0x99a: 0x3f40, 0x99b: 0xa000, 0x99c: 0x3f48, 0x99d: 0xa000, + 0x99e: 0x3f50, 0x99f: 0xa000, 0x9a0: 0x3f58, 0x9a1: 0xa000, 0x9a2: 0x3f60, + 0x9a4: 0xa000, 0x9a5: 0x3f68, 0x9a6: 0xa000, 0x9a7: 0x3f70, 0x9a8: 0xa000, 0x9a9: 0x3f78, + 0x9af: 0xa000, + 0x9b0: 0x3f80, 0x9b1: 0x3f88, 0x9b2: 0xa000, 0x9b3: 0x3f90, 0x9b4: 0x3f98, 0x9b5: 0xa000, + 0x9b6: 0x3fa0, 0x9b7: 0x3fa8, 0x9b8: 0xa000, 0x9b9: 0x3fb0, 0x9ba: 0x3fb8, 0x9bb: 0xa000, + 0x9bc: 0x3fc0, 0x9bd: 0x3fc8, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f00, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42dc, 0x9dc: 0x42e2, 0x9dd: 0xa000, + 0x9de: 0x3fd0, 0x9df: 0x26b4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe0, 0x9ed: 0xa000, 0x9ee: 0x3fe8, 0x9ef: 0xa000, + 0x9f0: 0x3ff0, 0x9f1: 0xa000, 0x9f2: 0x3ff8, 0x9f3: 0xa000, 0x9f4: 0x4000, 0x9f5: 0xa000, + 0x9f6: 0x4008, 0x9f7: 0xa000, 0x9f8: 0x4010, 0x9f9: 0xa000, 0x9fa: 0x4018, 0x9fb: 0xa000, + 0x9fc: 0x4020, 0x9fd: 0xa000, 0x9fe: 0x4028, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4030, 0xa01: 0xa000, 0xa02: 0x4038, 0xa04: 0xa000, 0xa05: 0x4040, + 0xa06: 0xa000, 0xa07: 0x4048, 0xa08: 0xa000, 0xa09: 0x4050, + 0xa0f: 0xa000, 0xa10: 0x4058, 0xa11: 0x4060, + 0xa12: 0xa000, 0xa13: 0x4068, 0xa14: 0x4070, 0xa15: 0xa000, 0xa16: 0x4078, 0xa17: 0x4080, + 0xa18: 0xa000, 0xa19: 0x4088, 0xa1a: 0x4090, 0xa1b: 0xa000, 0xa1c: 0x4098, 0xa1d: 0x40a0, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fd8, + 0xa37: 0x40a8, 0xa38: 0x40b0, 0xa39: 0x40b8, 0xa3a: 0x40c0, + 0xa3d: 0xa000, 0xa3e: 0x40c8, 0xa3f: 0x26c9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49bd, 0xa50: 0x49c3, 0xa51: 0x49c9, + 0xa52: 0x49cf, 0xa53: 0x49d5, 0xa54: 0x49db, 0xa55: 0x49e1, 0xa56: 0x49e7, 0xa57: 0x49ed, + 0xa58: 0x49f3, 0xa59: 0x49f9, 0xa5a: 0x49ff, 0xa5b: 0x4a05, 0xa5c: 0x4a0b, 0xa5d: 0x4a11, + 0xa5e: 0x4a17, 0xa5f: 0x4a1d, 0xa60: 0x4a23, 0xa61: 0x4a29, 0xa62: 0x4a2f, 0xa63: 0x4a35, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2054, 0xac1: 0x205a, 0xac2: 0x2060, 0xac3: 0x2066, 0xac4: 0x206c, 0xac5: 0x2072, + 0xac6: 0x2078, 0xac7: 0x207e, 0xac8: 0x2084, 0xac9: 0x208a, 0xaca: 0x2090, 0xacb: 0x2096, + 0xacc: 0x209c, 0xacd: 0x20a2, 0xace: 0x2726, 0xacf: 0x272f, 0xad0: 0x2738, 0xad1: 0x2741, + 0xad2: 0x274a, 0xad3: 0x2753, 0xad4: 0x275c, 0xad5: 0x2765, 0xad6: 0x276e, 0xad7: 0x2780, + 0xad8: 0x2789, 0xad9: 0x2792, 0xada: 0x279b, 0xadb: 0x27a4, 0xadc: 0x2777, 0xadd: 0x2bac, + 0xade: 0x2aed, 0xae0: 0x20a8, 0xae1: 0x20c0, 0xae2: 0x20b4, 0xae3: 0x2108, + 0xae4: 0x20c6, 0xae5: 0x20e4, 0xae6: 0x20ae, 0xae7: 0x20de, 0xae8: 0x20ba, 0xae9: 0x20f0, + 0xaea: 0x2120, 0xaeb: 0x213e, 0xaec: 0x2138, 0xaed: 0x212c, 0xaee: 0x217a, 0xaef: 0x210e, + 0xaf0: 0x211a, 0xaf1: 0x2132, 0xaf2: 0x2126, 0xaf3: 0x2150, 0xaf4: 0x20fc, 0xaf5: 0x2144, + 0xaf6: 0x216e, 0xaf7: 0x2156, 0xaf8: 0x20ea, 0xaf9: 0x20cc, 0xafa: 0x2102, 0xafb: 0x2114, + 0xafc: 0x214a, 0xafd: 0x20d2, 0xafe: 0x2174, 0xaff: 0x20f6, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215c, 0xb01: 0x20d8, 0xb02: 0x2162, 0xb03: 0x2168, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc4, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e18, 0xb2f: 0x2e20, + 0xb30: 0x2e28, 0xb31: 0x2e30, 0xb32: 0x2e38, 0xb33: 0x2e40, 0xb34: 0x2e48, 0xb35: 0x2e50, + 0xb36: 0x2e60, 0xb37: 0x2e68, 0xb38: 0x2e70, 0xb39: 0x2e78, 0xb3a: 0x2e80, 0xb3b: 0x2e88, + 0xb3c: 0x2ed3, 0xb3d: 0x2e9b, 0xb3e: 0x2e58, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc6, 0xb81: 0x1cd5, 0xb82: 0x1ce4, 0xb83: 0x1cf3, 0xb84: 0x1d02, 0xb85: 0x1d11, + 0xb86: 0x1d20, 0xb87: 0x1d2f, 0xb88: 0x1d3e, 0xb89: 0x218c, 0xb8a: 0x219e, 0xb8b: 0x21b0, + 0xb8c: 0x1954, 0xb8d: 0x1c04, 0xb8e: 0x19d2, 0xb8f: 0x1ba8, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0c, 0xbc1: 0x29a8, 0xbc2: 0x2b1c, 0xbc3: 0x2880, 0xbc4: 0x2ee4, 0xbc5: 0x288a, + 0xbc6: 0x2894, 0xbc7: 0x2f28, 0xbc8: 0x29b5, 0xbc9: 0x289e, 0xbca: 0x28a8, 0xbcb: 0x28b2, + 0xbcc: 0x29dc, 0xbcd: 0x29e9, 0xbce: 0x29c2, 0xbcf: 0x29cf, 0xbd0: 0x2ea9, 0xbd1: 0x29f6, + 0xbd2: 0x2a03, 0xbd3: 0x2bbe, 0xbd4: 0x26bb, 0xbd5: 0x2bd1, 0xbd6: 0x2be4, 0xbd7: 0x2b2c, + 0xbd8: 0x2a10, 0xbd9: 0x2bf7, 0xbda: 0x2c0a, 0xbdb: 0x2a1d, 0xbdc: 0x28bc, 0xbdd: 0x28c6, + 0xbde: 0x2eb7, 0xbdf: 0x2a2a, 0xbe0: 0x2b3c, 0xbe1: 0x2ef5, 0xbe2: 0x28d0, 0xbe3: 0x28da, + 0xbe4: 0x2a37, 0xbe5: 0x28e4, 0xbe6: 0x28ee, 0xbe7: 0x26d0, 0xbe8: 0x26d7, 0xbe9: 0x28f8, + 0xbea: 0x2902, 0xbeb: 0x2c1d, 0xbec: 0x2a44, 0xbed: 0x2b4c, 0xbee: 0x2c30, 0xbef: 0x2a51, + 0xbf0: 0x2916, 0xbf1: 0x290c, 0xbf2: 0x2f3c, 0xbf3: 0x2a5e, 0xbf4: 0x2c43, 0xbf5: 0x2920, + 0xbf6: 0x2b5c, 0xbf7: 0x292a, 0xbf8: 0x2a78, 0xbf9: 0x2934, 0xbfa: 0x2a85, 0xbfb: 0x2f06, + 0xbfc: 0x2a6b, 0xbfd: 0x2b6c, 0xbfe: 0x2a92, 0xbff: 0x26de, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f17, 0xc01: 0x293e, 0xc02: 0x2948, 0xc03: 0x2a9f, 0xc04: 0x2952, 0xc05: 0x295c, + 0xc06: 0x2966, 0xc07: 0x2b7c, 0xc08: 0x2aac, 0xc09: 0x26e5, 0xc0a: 0x2c56, 0xc0b: 0x2e90, + 0xc0c: 0x2b8c, 0xc0d: 0x2ab9, 0xc0e: 0x2ec5, 0xc0f: 0x2970, 0xc10: 0x297a, 0xc11: 0x2ac6, + 0xc12: 0x26ec, 0xc13: 0x2ad3, 0xc14: 0x2b9c, 0xc15: 0x26f3, 0xc16: 0x2c69, 0xc17: 0x2984, + 0xc18: 0x1cb7, 0xc19: 0x1ccb, 0xc1a: 0x1cda, 0xc1b: 0x1ce9, 0xc1c: 0x1cf8, 0xc1d: 0x1d07, + 0xc1e: 0x1d16, 0xc1f: 0x1d25, 0xc20: 0x1d34, 0xc21: 0x1d43, 0xc22: 0x2192, 0xc23: 0x21a4, + 0xc24: 0x21b6, 0xc25: 0x21c2, 0xc26: 0x21ce, 0xc27: 0x21da, 0xc28: 0x21e6, 0xc29: 0x21f2, + 0xc2a: 0x21fe, 0xc2b: 0x220a, 0xc2c: 0x2246, 0xc2d: 0x2252, 0xc2e: 0x225e, 0xc2f: 0x226a, + 0xc30: 0x2276, 0xc31: 0x1c14, 0xc32: 0x19c6, 0xc33: 0x1936, 0xc34: 0x1be4, 0xc35: 0x1a47, + 0xc36: 0x1a56, 0xc37: 0x19cc, 0xc38: 0x1bfc, 0xc39: 0x1c00, 0xc3a: 0x1960, 0xc3b: 0x2701, + 0xc3c: 0x270f, 0xc3d: 0x26fa, 0xc3e: 0x2708, 0xc3f: 0x2ae0, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4a, 0xc41: 0x1a32, 0xc42: 0x1c60, 0xc43: 0x1a1a, 0xc44: 0x19f3, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf0, 0xc49: 0x1d52, 0xc4a: 0x1a4d, 0xc4b: 0x1a35, + 0xc4c: 0x1c64, 0xc4d: 0x1c70, 0xc4e: 0x1a26, 0xc4f: 0x19fc, 0xc50: 0x1957, 0xc51: 0x1c1c, + 0xc52: 0x1bb0, 0xc53: 0x1b9c, 0xc54: 0x1bcc, 0xc55: 0x1c74, 0xc56: 0x1a29, 0xc57: 0x19c9, + 0xc58: 0x19ff, 0xc59: 0x19de, 0xc5a: 0x1a41, 0xc5b: 0x1c78, 0xc5c: 0x1a2c, 0xc5d: 0x19c0, + 0xc5e: 0x1a02, 0xc5f: 0x1c3c, 0xc60: 0x1bf4, 0xc61: 0x1a14, 0xc62: 0x1c24, 0xc63: 0x1c40, + 0xc64: 0x1bf8, 0xc65: 0x1a17, 0xc66: 0x1c28, 0xc67: 0x22e8, 0xc68: 0x22fc, 0xc69: 0x1996, + 0xc6a: 0x1c20, 0xc6b: 0x1bb4, 0xc6c: 0x1ba0, 0xc6d: 0x1c48, 0xc6e: 0x2716, 0xc6f: 0x27ad, + 0xc70: 0x1a59, 0xc71: 0x1a44, 0xc72: 0x1c7c, 0xc73: 0x1a2f, 0xc74: 0x1a50, 0xc75: 0x1a38, + 0xc76: 0x1c68, 0xc77: 0x1a1d, 0xc78: 0x19f6, 0xc79: 0x1981, 0xc7a: 0x1a53, 0xc7b: 0x1a3b, + 0xc7c: 0x1c6c, 0xc7d: 0x1a20, 0xc7e: 0x19f9, 0xc7f: 0x1984, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2c, 0xc81: 0x1bb8, 0xc82: 0x1d4d, 0xc83: 0x1939, 0xc84: 0x19ba, 0xc85: 0x19bd, + 0xc86: 0x22f5, 0xc87: 0x1b94, 0xc88: 0x19c3, 0xc89: 0x194b, 0xc8a: 0x19e1, 0xc8b: 0x194e, + 0xc8c: 0x19ea, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a05, 0xc90: 0x1a0b, 0xc91: 0x1a0e, + 0xc92: 0x1c30, 0xc93: 0x1a11, 0xc94: 0x1a23, 0xc95: 0x1c38, 0xc96: 0x1c44, 0xc97: 0x1990, + 0xc98: 0x1d57, 0xc99: 0x1bbc, 0xc9a: 0x1993, 0xc9b: 0x1a5c, 0xc9c: 0x19a5, 0xc9d: 0x19b4, + 0xc9e: 0x22e2, 0xc9f: 0x22dc, 0xca0: 0x1cc1, 0xca1: 0x1cd0, 0xca2: 0x1cdf, 0xca3: 0x1cee, + 0xca4: 0x1cfd, 0xca5: 0x1d0c, 0xca6: 0x1d1b, 0xca7: 0x1d2a, 0xca8: 0x1d39, 0xca9: 0x2186, + 0xcaa: 0x2198, 0xcab: 0x21aa, 0xcac: 0x21bc, 0xcad: 0x21c8, 0xcae: 0x21d4, 0xcaf: 0x21e0, + 0xcb0: 0x21ec, 0xcb1: 0x21f8, 0xcb2: 0x2204, 0xcb3: 0x2240, 0xcb4: 0x224c, 0xcb5: 0x2258, + 0xcb6: 0x2264, 0xcb7: 0x2270, 0xcb8: 0x227c, 0xcb9: 0x2282, 0xcba: 0x2288, 0xcbb: 0x228e, + 0xcbc: 0x2294, 0xcbd: 0x22a6, 0xcbe: 0x22ac, 0xcbf: 0x1c10, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d5, 0xec1: 0x19d8, 0xec2: 0x19db, 0xec3: 0x1c08, 0xec4: 0x1c0c, 0xec5: 0x1a5f, + 0xec6: 0x1a5f, + 0xed3: 0x1d75, 0xed4: 0x1d66, 0xed5: 0x1d6b, 0xed6: 0x1d7a, 0xed7: 0x1d70, + 0xedd: 0x4390, + 0xede: 0x8115, 0xedf: 0x4402, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f0, 0xeeb: 0x43f6, 0xeec: 0x44f4, 0xeed: 0x44fc, 0xeee: 0x4348, 0xeef: 0x434e, + 0xef0: 0x4354, 0xef1: 0x435a, 0xef2: 0x4366, 0xef3: 0x436c, 0xef4: 0x4372, 0xef5: 0x437e, + 0xef6: 0x4384, 0xef8: 0x438a, 0xef9: 0x4396, 0xefa: 0x439c, 0xefb: 0x43a2, + 0xefc: 0x43ae, 0xefe: 0x43b4, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43ba, 0xf01: 0x43c0, 0xf03: 0x43c6, 0xf04: 0x43cc, + 0xf06: 0x43d8, 0xf07: 0x43de, 0xf08: 0x43e4, 0xf09: 0x43ea, 0xf0a: 0x43fc, 0xf0b: 0x4378, + 0xf0c: 0x4360, 0xf0d: 0x43a8, 0xf0e: 0x43d2, 0xf0f: 0x1d7f, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x446e, 0xf65: 0x446e, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x4468, 0xf71: 0x4468, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x204f, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25aa, 0xfab: 0x25aa, 0xfac: 0x261a, 0xfad: 0x261a, 0xfae: 0x25e9, 0xfaf: 0x25e9, + 0xfb0: 0x2605, 0xfb1: 0x2605, 0xfb2: 0x25fe, 0xfb3: 0x25fe, 0xfb4: 0x260c, 0xfb5: 0x260c, + 0xfb6: 0x2613, 0xfb7: 0x2613, 0xfb8: 0x2613, 0xfb9: 0x25f0, 0xfba: 0x25f0, 0xfbb: 0x25f0, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b1, 0xfc1: 0x25b8, 0xfc2: 0x25d4, 0xfc3: 0x25f0, 0xfc4: 0x25f7, 0xfc5: 0x1d89, + 0xfc6: 0x1d8e, 0xfc7: 0x1d93, 0xfc8: 0x1da2, 0xfc9: 0x1db1, 0xfca: 0x1db6, 0xfcb: 0x1dbb, + 0xfcc: 0x1dc0, 0xfcd: 0x1dc5, 0xfce: 0x1dd4, 0xfcf: 0x1de3, 0xfd0: 0x1de8, 0xfd1: 0x1ded, + 0xfd2: 0x1dfc, 0xfd3: 0x1e0b, 0xfd4: 0x1e10, 0xfd5: 0x1e15, 0xfd6: 0x1e1a, 0xfd7: 0x1e29, + 0xfd8: 0x1e2e, 0xfd9: 0x1e3d, 0xfda: 0x1e42, 0xfdb: 0x1e47, 0xfdc: 0x1e56, 0xfdd: 0x1e5b, + 0xfde: 0x1e60, 0xfdf: 0x1e6a, 0xfe0: 0x1ea6, 0xfe1: 0x1eb5, 0xfe2: 0x1ec4, 0xfe3: 0x1ec9, + 0xfe4: 0x1ece, 0xfe5: 0x1ed8, 0xfe6: 0x1ee7, 0xfe7: 0x1eec, 0xfe8: 0x1efb, 0xfe9: 0x1f00, + 0xfea: 0x1f05, 0xfeb: 0x1f14, 0xfec: 0x1f19, 0xfed: 0x1f28, 0xfee: 0x1f2d, 0xfef: 0x1f32, + 0xff0: 0x1f37, 0xff1: 0x1f3c, 0xff2: 0x1f41, 0xff3: 0x1f46, 0xff4: 0x1f4b, 0xff5: 0x1f50, + 0xff6: 0x1f55, 0xff7: 0x1f5a, 0xff8: 0x1f5f, 0xff9: 0x1f64, 0xffa: 0x1f69, 0xffb: 0x1f6e, + 0xffc: 0x1f73, 0xffd: 0x1f78, 0xffe: 0x1f7d, 0xfff: 0x1f87, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8c, 0x1001: 0x1f91, 0x1002: 0x1f96, 0x1003: 0x1fa0, 0x1004: 0x1fa5, 0x1005: 0x1faf, + 0x1006: 0x1fb4, 0x1007: 0x1fb9, 0x1008: 0x1fbe, 0x1009: 0x1fc3, 0x100a: 0x1fc8, 0x100b: 0x1fcd, + 0x100c: 0x1fd2, 0x100d: 0x1fd7, 0x100e: 0x1fe6, 0x100f: 0x1ff5, 0x1010: 0x1ffa, 0x1011: 0x1fff, + 0x1012: 0x2004, 0x1013: 0x2009, 0x1014: 0x200e, 0x1015: 0x2018, 0x1016: 0x201d, 0x1017: 0x2022, + 0x1018: 0x2031, 0x1019: 0x2040, 0x101a: 0x2045, 0x101b: 0x4420, 0x101c: 0x4426, 0x101d: 0x445c, + 0x101e: 0x44b3, 0x101f: 0x44ba, 0x1020: 0x44c1, 0x1021: 0x44c8, 0x1022: 0x44cf, 0x1023: 0x44d6, + 0x1024: 0x25c6, 0x1025: 0x25cd, 0x1026: 0x25d4, 0x1027: 0x25db, 0x1028: 0x25f0, 0x1029: 0x25f7, + 0x102a: 0x1d98, 0x102b: 0x1d9d, 0x102c: 0x1da2, 0x102d: 0x1da7, 0x102e: 0x1db1, 0x102f: 0x1db6, + 0x1030: 0x1dca, 0x1031: 0x1dcf, 0x1032: 0x1dd4, 0x1033: 0x1dd9, 0x1034: 0x1de3, 0x1035: 0x1de8, + 0x1036: 0x1df2, 0x1037: 0x1df7, 0x1038: 0x1dfc, 0x1039: 0x1e01, 0x103a: 0x1e0b, 0x103b: 0x1e10, + 0x103c: 0x1f3c, 0x103d: 0x1f41, 0x103e: 0x1f50, 0x103f: 0x1f55, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5a, 0x1041: 0x1f6e, 0x1042: 0x1f73, 0x1043: 0x1f78, 0x1044: 0x1f7d, 0x1045: 0x1f96, + 0x1046: 0x1fa0, 0x1047: 0x1fa5, 0x1048: 0x1faa, 0x1049: 0x1fbe, 0x104a: 0x1fdc, 0x104b: 0x1fe1, + 0x104c: 0x1fe6, 0x104d: 0x1feb, 0x104e: 0x1ff5, 0x104f: 0x1ffa, 0x1050: 0x445c, 0x1051: 0x2027, + 0x1052: 0x202c, 0x1053: 0x2031, 0x1054: 0x2036, 0x1055: 0x2040, 0x1056: 0x2045, 0x1057: 0x25b1, + 0x1058: 0x25b8, 0x1059: 0x25bf, 0x105a: 0x25d4, 0x105b: 0x25e2, 0x105c: 0x1d89, 0x105d: 0x1d8e, + 0x105e: 0x1d93, 0x105f: 0x1da2, 0x1060: 0x1dac, 0x1061: 0x1dbb, 0x1062: 0x1dc0, 0x1063: 0x1dc5, + 0x1064: 0x1dd4, 0x1065: 0x1dde, 0x1066: 0x1dfc, 0x1067: 0x1e15, 0x1068: 0x1e1a, 0x1069: 0x1e29, + 0x106a: 0x1e2e, 0x106b: 0x1e3d, 0x106c: 0x1e47, 0x106d: 0x1e56, 0x106e: 0x1e5b, 0x106f: 0x1e60, + 0x1070: 0x1e6a, 0x1071: 0x1ea6, 0x1072: 0x1eab, 0x1073: 0x1eb5, 0x1074: 0x1ec4, 0x1075: 0x1ec9, + 0x1076: 0x1ece, 0x1077: 0x1ed8, 0x1078: 0x1ee7, 0x1079: 0x1efb, 0x107a: 0x1f00, 0x107b: 0x1f05, + 0x107c: 0x1f14, 0x107d: 0x1f19, 0x107e: 0x1f28, 0x107f: 0x1f2d, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f32, 0x1081: 0x1f37, 0x1082: 0x1f46, 0x1083: 0x1f4b, 0x1084: 0x1f5f, 0x1085: 0x1f64, + 0x1086: 0x1f69, 0x1087: 0x1f6e, 0x1088: 0x1f73, 0x1089: 0x1f87, 0x108a: 0x1f8c, 0x108b: 0x1f91, + 0x108c: 0x1f96, 0x108d: 0x1f9b, 0x108e: 0x1faf, 0x108f: 0x1fb4, 0x1090: 0x1fb9, 0x1091: 0x1fbe, + 0x1092: 0x1fcd, 0x1093: 0x1fd2, 0x1094: 0x1fd7, 0x1095: 0x1fe6, 0x1096: 0x1ff0, 0x1097: 0x1fff, + 0x1098: 0x2004, 0x1099: 0x4450, 0x109a: 0x2018, 0x109b: 0x201d, 0x109c: 0x2022, 0x109d: 0x2031, + 0x109e: 0x203b, 0x109f: 0x25d4, 0x10a0: 0x25e2, 0x10a1: 0x1da2, 0x10a2: 0x1dac, 0x10a3: 0x1dd4, + 0x10a4: 0x1dde, 0x10a5: 0x1dfc, 0x10a6: 0x1e06, 0x10a7: 0x1e6a, 0x10a8: 0x1e6f, 0x10a9: 0x1e92, + 0x10aa: 0x1e97, 0x10ab: 0x1f6e, 0x10ac: 0x1f73, 0x10ad: 0x1f96, 0x10ae: 0x1fe6, 0x10af: 0x1ff0, + 0x10b0: 0x2031, 0x10b1: 0x203b, 0x10b2: 0x4504, 0x10b3: 0x450c, 0x10b4: 0x4514, 0x10b5: 0x1ef1, + 0x10b6: 0x1ef6, 0x10b7: 0x1f0a, 0x10b8: 0x1f0f, 0x10b9: 0x1f1e, 0x10ba: 0x1f23, 0x10bb: 0x1e74, + 0x10bc: 0x1e79, 0x10bd: 0x1e9c, 0x10be: 0x1ea1, 0x10bf: 0x1e33, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e38, 0x10c1: 0x1e1f, 0x10c2: 0x1e24, 0x10c3: 0x1e4c, 0x10c4: 0x1e51, 0x10c5: 0x1eba, + 0x10c6: 0x1ebf, 0x10c7: 0x1edd, 0x10c8: 0x1ee2, 0x10c9: 0x1e7e, 0x10ca: 0x1e83, 0x10cb: 0x1e88, + 0x10cc: 0x1e92, 0x10cd: 0x1e8d, 0x10ce: 0x1e65, 0x10cf: 0x1eb0, 0x10d0: 0x1ed3, 0x10d1: 0x1ef1, + 0x10d2: 0x1ef6, 0x10d3: 0x1f0a, 0x10d4: 0x1f0f, 0x10d5: 0x1f1e, 0x10d6: 0x1f23, 0x10d7: 0x1e74, + 0x10d8: 0x1e79, 0x10d9: 0x1e9c, 0x10da: 0x1ea1, 0x10db: 0x1e33, 0x10dc: 0x1e38, 0x10dd: 0x1e1f, + 0x10de: 0x1e24, 0x10df: 0x1e4c, 0x10e0: 0x1e51, 0x10e1: 0x1eba, 0x10e2: 0x1ebf, 0x10e3: 0x1edd, + 0x10e4: 0x1ee2, 0x10e5: 0x1e7e, 0x10e6: 0x1e83, 0x10e7: 0x1e88, 0x10e8: 0x1e92, 0x10e9: 0x1e8d, + 0x10ea: 0x1e65, 0x10eb: 0x1eb0, 0x10ec: 0x1ed3, 0x10ed: 0x1e7e, 0x10ee: 0x1e83, 0x10ef: 0x1e88, + 0x10f0: 0x1e92, 0x10f1: 0x1e6f, 0x10f2: 0x1e97, 0x10f3: 0x1eec, 0x10f4: 0x1e56, 0x10f5: 0x1e5b, + 0x10f6: 0x1e60, 0x10f7: 0x1e7e, 0x10f8: 0x1e83, 0x10f9: 0x1e88, 0x10fa: 0x1eec, 0x10fb: 0x1efb, + 0x10fc: 0x4408, 0x10fd: 0x4408, + // Block 0x44, offset 0x1100 + 0x1110: 0x2311, 0x1111: 0x2326, + 0x1112: 0x2326, 0x1113: 0x232d, 0x1114: 0x2334, 0x1115: 0x2349, 0x1116: 0x2350, 0x1117: 0x2357, + 0x1118: 0x237a, 0x1119: 0x237a, 0x111a: 0x239d, 0x111b: 0x2396, 0x111c: 0x23b2, 0x111d: 0x23a4, + 0x111e: 0x23ab, 0x111f: 0x23ce, 0x1120: 0x23ce, 0x1121: 0x23c7, 0x1122: 0x23d5, 0x1123: 0x23d5, + 0x1124: 0x23ff, 0x1125: 0x23ff, 0x1126: 0x241b, 0x1127: 0x23e3, 0x1128: 0x23e3, 0x1129: 0x23dc, + 0x112a: 0x23f1, 0x112b: 0x23f1, 0x112c: 0x23f8, 0x112d: 0x23f8, 0x112e: 0x2422, 0x112f: 0x2430, + 0x1130: 0x2430, 0x1131: 0x2437, 0x1132: 0x2437, 0x1133: 0x243e, 0x1134: 0x2445, 0x1135: 0x244c, + 0x1136: 0x2453, 0x1137: 0x2453, 0x1138: 0x245a, 0x1139: 0x2468, 0x113a: 0x2476, 0x113b: 0x246f, + 0x113c: 0x247d, 0x113d: 0x247d, 0x113e: 0x2492, 0x113f: 0x2499, + // Block 0x45, offset 0x1140 + 0x1140: 0x24ca, 0x1141: 0x24d8, 0x1142: 0x24d1, 0x1143: 0x24b5, 0x1144: 0x24b5, 0x1145: 0x24df, + 0x1146: 0x24df, 0x1147: 0x24e6, 0x1148: 0x24e6, 0x1149: 0x2510, 0x114a: 0x2517, 0x114b: 0x251e, + 0x114c: 0x24f4, 0x114d: 0x2502, 0x114e: 0x2525, 0x114f: 0x252c, + 0x1152: 0x24fb, 0x1153: 0x2580, 0x1154: 0x2587, 0x1155: 0x255d, 0x1156: 0x2564, 0x1157: 0x2548, + 0x1158: 0x2548, 0x1159: 0x254f, 0x115a: 0x2579, 0x115b: 0x2572, 0x115c: 0x259c, 0x115d: 0x259c, + 0x115e: 0x230a, 0x115f: 0x231f, 0x1160: 0x2318, 0x1161: 0x2342, 0x1162: 0x233b, 0x1163: 0x2365, + 0x1164: 0x235e, 0x1165: 0x2388, 0x1166: 0x236c, 0x1167: 0x2381, 0x1168: 0x23b9, 0x1169: 0x2406, + 0x116a: 0x23ea, 0x116b: 0x2429, 0x116c: 0x24c3, 0x116d: 0x24ed, 0x116e: 0x2595, 0x116f: 0x258e, + 0x1170: 0x25a3, 0x1171: 0x253a, 0x1172: 0x24a0, 0x1173: 0x256b, 0x1174: 0x2492, 0x1175: 0x24ca, + 0x1176: 0x2461, 0x1177: 0x24ae, 0x1178: 0x2541, 0x1179: 0x2533, 0x117a: 0x24bc, 0x117b: 0x24a7, + 0x117c: 0x24bc, 0x117d: 0x2541, 0x117e: 0x2373, 0x117f: 0x238f, + // Block 0x46, offset 0x1180 + 0x1180: 0x2509, 0x1181: 0x2484, 0x1182: 0x2303, 0x1183: 0x24a7, 0x1184: 0x244c, 0x1185: 0x241b, + 0x1186: 0x23c0, 0x1187: 0x2556, + 0x11b0: 0x2414, 0x11b1: 0x248b, 0x11b2: 0x27bf, 0x11b3: 0x27b6, 0x11b4: 0x27ec, 0x11b5: 0x27da, + 0x11b6: 0x27c8, 0x11b7: 0x27e3, 0x11b8: 0x27f5, 0x11b9: 0x240d, 0x11ba: 0x2c7c, 0x11bb: 0x2afc, + 0x11bc: 0x27d1, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5c, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4269, 0x120a: 0x4269, 0x120b: 0x4269, + 0x120c: 0x4269, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42aa, 0x1231: 0x442c, 0x1232: 0x42af, 0x1234: 0x42b4, + 0x1236: 0x42b9, 0x1237: 0x4432, 0x1238: 0x42be, 0x1239: 0x4438, 0x123a: 0x42c3, 0x123b: 0x443e, + 0x123c: 0x42c8, 0x123d: 0x4444, 0x123e: 0x42cd, 0x123f: 0x444a, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x440e, 0x1242: 0x440e, 0x1243: 0x4414, 0x1244: 0x4414, 0x1245: 0x4456, + 0x1246: 0x4456, 0x1247: 0x441a, 0x1248: 0x441a, 0x1249: 0x4462, 0x124a: 0x4462, 0x124b: 0x4462, + 0x124c: 0x4462, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e00, + 0x12b6: 0x2e00, 0x12b7: 0x2e08, 0x12b8: 0x2e08, 0x12b9: 0x2e10, 0x12ba: 0x2e10, 0x12bb: 0x1f82, + 0x12bc: 0x1f82, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a78, 0x131f: 0x4a7e, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3b, + 0x1324: 0x031b, 0x1325: 0x4a41, 0x1326: 0x4a47, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a4d, 0x132b: 0x4a53, 0x132c: 0x4a59, 0x132d: 0x4a5f, 0x132e: 0x4a65, 0x132f: 0x4a6b, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49bd, 0x1343: 0x49c3, 0x1344: 0x49c9, 0x1345: 0x49cf, + 0x1346: 0x49d5, 0x1347: 0x49db, 0x134a: 0x49e1, 0x134b: 0x49e7, + 0x134c: 0x49ed, 0x134d: 0x49f3, 0x134e: 0x49f9, 0x134f: 0x49ff, + 0x1352: 0x4a05, 0x1353: 0x4a0b, 0x1354: 0x4a11, 0x1355: 0x4a17, 0x1356: 0x4a1d, 0x1357: 0x4a23, + 0x135a: 0x4a29, 0x135b: 0x4a2f, 0x135c: 0x4a35, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4264, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8c, 0x14d1: 0x1a90, + 0x14d2: 0x1a94, 0x14d3: 0x1a98, 0x14d4: 0x1a9c, 0x14d5: 0x1aa0, 0x14d6: 0x1aa4, 0x14d7: 0x1aa8, + 0x14d8: 0x1aac, 0x14d9: 0x1ab0, 0x14da: 0x1ab4, 0x14db: 0x1ab8, 0x14dc: 0x1abc, 0x14dd: 0x1ac0, + 0x14de: 0x1ac4, 0x14df: 0x1ac8, 0x14e0: 0x1acc, 0x14e1: 0x1ad0, 0x14e2: 0x1ad4, 0x14e3: 0x1ad8, + 0x14e4: 0x1adc, 0x14e5: 0x1ae0, 0x14e6: 0x1ae4, 0x14e7: 0x1ae8, 0x14e8: 0x1aec, 0x14e9: 0x1af0, + 0x14ea: 0x271e, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b1, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26ad, 0x1501: 0x26c2, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c4, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, + 0x3a8: 0xde, 0x3a9: 0xdf, 0x3aa: 0xe0, + 0x3b0: 0xda, 0x3b5: 0xe1, 0x3b6: 0xe2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe3, 0x3ec: 0xe4, + // Block 0x10, offset 0x400 + 0x432: 0xe5, + // Block 0x11, offset 0x440 + 0x445: 0xe6, 0x446: 0xe7, 0x447: 0xe8, + 0x449: 0xe9, + 0x450: 0xea, 0x451: 0xeb, 0x452: 0xec, 0x453: 0xed, 0x454: 0xee, 0x455: 0xef, 0x456: 0xf0, 0x457: 0xf1, + 0x458: 0xf2, 0x459: 0xf3, 0x45a: 0x4c, 0x45b: 0xf4, 0x45c: 0xf5, 0x45d: 0xf6, 0x45e: 0xf7, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf8, + 0x4a3: 0xf9, 0x4a5: 0xfa, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfb, 0x4c6: 0xfc, + 0x4c8: 0x52, 0x4c9: 0xfd, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 162 entries, 324 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xde, 0xe2, 0xe8, 0xf9, 0x105, 0x107, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11e, 0x121, 0x123, 0x126, 0x129, 0x12d, 0x132, 0x13b, 0x13d, 0x140, 0x142, 0x14d, 0x158, 0x166, 0x174, 0x184, 0x192, 0x199, 0x19f, 0x1ae, 0x1b2, 0x1b4, 0x1b8, 0x1ba, 0x1bd, 0x1bf, 0x1c2, 0x1c4, 0x1c7, 0x1c9, 0x1cb, 0x1cd, 0x1d9, 0x1e3, 0x1ed, 0x1f0, 0x1f4, 0x1f6, 0x1f8, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x203, 0x205, 0x207, 0x20d, 0x210, 0x214, 0x216, 0x21d, 0x223, 0x229, 0x231, 0x237, 0x23d, 0x243, 0x247, 0x249, 0x24b, 0x24d, 0x24f, 0x255, 0x258, 0x25a, 0x260, 0x263, 0x26b, 0x272, 0x275, 0x278, 0x27a, 0x27d, 0x285, 0x289, 0x290, 0x293, 0x299, 0x29b, 0x29d, 0x2a0, 0x2a2, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2b6, 0x2c3, 0x2cd, 0x2cf, 0x2d1, 0x2d5, 0x2da, 0x2e6, 0x2eb, 0x2f4, 0x2fa, 0x2ff, 0x303, 0x308, 0x30c, 0x31c, 0x32a, 0x338, 0x346, 0x34c, 0x34e, 0x351, 0x35b, 0x35d} + +// nfkcSparseValues: 871 entries, 3484 bytes +var nfkcSparseValues = [871]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x22, offset 0xde + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe2 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xf9 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x107 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x10f + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x111 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x121 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x123 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x126 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x129 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12d + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x132 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13d + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x140 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x142 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14d + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x158 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x166 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x174 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x184 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x192 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x199 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x19f + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1ae + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b2 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b4 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1ba + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1bd + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1bf + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1c9 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1cd + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1d9 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e3 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ed + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f0 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f6 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f8 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x201 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x207 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x210 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x216 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x247 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x249 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x24f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x255 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x263 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x275 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x278 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x285 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x289 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x290 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x87, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x88, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x89, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8a, offset 0x2b6 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x2c3 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8c, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8d, offset 0x2cf + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8e, offset 0x2d1 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8f, offset 0x2d5 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x90, offset 0x2da + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x91, offset 0x2e6 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x92, offset 0x2eb + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x93, offset 0x2f4 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x94, offset 0x2fa + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x95, offset 0x2ff + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x96, offset 0x303 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x308 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x30c + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x99, offset 0x31c + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x32a + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9b, offset 0x338 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x346 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9d, offset 0x34c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9e, offset 0x34e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9f, offset 0x351 + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0xa0, offset 0x35b + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa1, offset 0x35d + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54514 bytes) diff --git a/cluster-autoscaler/vendor/golang.org/x/text/width/kind_string.go b/cluster-autoscaler/vendor/golang.org/x/text/width/kind_string.go index edbdc54bc1e..dd3febd43b2 100644 --- a/cluster-autoscaler/vendor/golang.org/x/text/width/kind_string.go +++ b/cluster-autoscaler/vendor/golang.org/x/text/width/kind_string.go @@ -4,6 +4,18 @@ package width import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Neutral-0] + _ = x[EastAsianAmbiguous-1] + _ = x[EastAsianWide-2] + _ = x[EastAsianNarrow-3] + _ = x[EastAsianFullwidth-4] + _ = x[EastAsianHalfwidth-5] +} + const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} diff --git a/cluster-autoscaler/vendor/golang.org/x/text/width/tables10.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/width/tables10.0.0.go index f4988626731..decb8e48093 100644 --- a/cluster-autoscaler/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/cluster-autoscaler/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package width diff --git a/cluster-autoscaler/vendor/golang.org/x/text/width/tables11.0.0.go b/cluster-autoscaler/vendor/golang.org/x/text/width/tables11.0.0.go new file mode 100644 index 00000000000..d6def0e7be5 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/text/width/tables11.0.0.go @@ -0,0 +1,1330 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x187a: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/cluster-autoscaler/vendor/golang.org/x/time/rate/rate.go b/cluster-autoscaler/vendor/golang.org/x/time/rate/rate.go index 938feaffe9b..ae93e247197 100644 --- a/cluster-autoscaler/vendor/golang.org/x/time/rate/rate.go +++ b/cluster-autoscaler/vendor/golang.org/x/time/rate/rate.go @@ -6,12 +6,11 @@ package rate import ( + "context" "fmt" "math" "sync" "time" - - "golang.org/x/net/context" ) // Limit defines the maximum frequency of some events. @@ -244,8 +243,12 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } - // Wait - t := time.NewTimer(r.DelayFrom(now)) + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) defer t.Stop() select { case <-t.C: diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go deleted file mode 100644 index 9521b50e9e8..00000000000 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/annotations.proto - -package annotations // import "google.golang.org/genproto/googleapis/api/annotations" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_Http = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", -} - -func init() { - proto.RegisterExtension(E_Http) -} - -func init() { - proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d) -} - -var fileDescriptor_annotations_55609bb51d80951d = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, - 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, - 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go deleted file mode 100644 index d64b32280f0..00000000000 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/client.proto - -package annotations // import "google.golang.org/genproto/googleapis/api/annotations" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_MethodSignature = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MethodOptions)(nil), - ExtensionType: ([]string)(nil), - Field: 1051, - Name: "google.api.method_signature", - Tag: "bytes,1051,rep,name=method_signature,json=methodSignature", - Filename: "google/api/client.proto", -} - -var E_DefaultHost = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1049, - Name: "google.api.default_host", - Tag: "bytes,1049,opt,name=default_host,json=defaultHost", - Filename: "google/api/client.proto", -} - -var E_OauthScopes = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1050, - Name: "google.api.oauth_scopes", - Tag: "bytes,1050,opt,name=oauth_scopes,json=oauthScopes", - Filename: "google/api/client.proto", -} - -func init() { - proto.RegisterExtension(E_MethodSignature) - proto.RegisterExtension(E_DefaultHost) - proto.RegisterExtension(E_OauthScopes) -} - -func init() { proto.RegisterFile("google/api/client.proto", fileDescriptor_client_1608614df476619f) } - -var fileDescriptor_client_1608614df476619f = []byte{ - // 262 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30, - 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8, - 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5, - 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd, - 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b, - 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b, - 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x4f, 0xec, 0x7c, 0x07, 0xd4, 0x60, 0x5d, - 0x05, 0xa3, 0xad, 0xa4, 0xce, 0x43, 0x76, 0xcd, 0x93, 0x62, 0xc0, 0xf8, 0x73, 0xac, 0xbc, 0x38, - 0x32, 0x68, 0xc3, 0xe5, 0xe7, 0x38, 0x3f, 0x9c, 0x4d, 0xd6, 0x67, 0x3d, 0x58, 0x0e, 0xdc, 0x7c, - 0xc5, 0x4e, 0x6a, 0xd8, 0xca, 0xae, 0xa5, 0xaa, 0xc1, 0x40, 0xd9, 0xcd, 0x1f, 0x4f, 0x09, 0xfe, - 0xcd, 0x28, 0x18, 0x44, 0xef, 0xe3, 0x7c, 0x34, 0x9b, 0xac, 0xa7, 0x89, 0x7a, 0xc0, 0x40, 0x7b, - 0x09, 0xca, 0x8e, 0x9a, 0x2a, 0x28, 0x74, 0x10, 0xfe, 0x97, 0x7c, 0x24, 0x49, 0xa4, 0xca, 0x08, - 0x2d, 0x0d, 0x3b, 0x55, 0xb8, 0xe3, 0x3f, 0x4b, 0x2c, 0xa7, 0xab, 0xb8, 0x51, 0xb1, 0x97, 0x14, - 0xa3, 0xd7, 0x45, 0x8a, 0x34, 0xb6, 0xd2, 0x6a, 0x8e, 0x5e, 0x0b, 0x0d, 0x36, 0xbe, 0x10, 0x7d, - 0x24, 0x9d, 0x09, 0x71, 0x5c, 0x69, 0x2d, 0x92, 0x8c, 0xbf, 0xee, 0x7e, 0xdd, 0x5f, 0x07, 0x47, - 0xf7, 0x8b, 0xe2, 0x71, 0x73, 0x1c, 0xa1, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xc2, - 0xcf, 0x71, 0x90, 0x01, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go deleted file mode 100644 index 9a9ab1242fe..00000000000 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/field_behavior.proto - -package annotations // import "google.golang.org/genproto/googleapis/api/annotations" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -type FieldBehavior int32 - -const ( - // Conventional default for enums. Do not use this. - FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - FieldBehavior_OPTIONAL FieldBehavior = 1 - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - FieldBehavior_REQUIRED FieldBehavior = 2 - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - FieldBehavior_INPUT_ONLY FieldBehavior = 4 - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - FieldBehavior_IMMUTABLE FieldBehavior = 5 -) - -var FieldBehavior_name = map[int32]string{ - 0: "FIELD_BEHAVIOR_UNSPECIFIED", - 1: "OPTIONAL", - 2: "REQUIRED", - 3: "OUTPUT_ONLY", - 4: "INPUT_ONLY", - 5: "IMMUTABLE", -} -var FieldBehavior_value = map[string]int32{ - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, -} - -func (x FieldBehavior) String() string { - return proto.EnumName(FieldBehavior_name, int32(x)) -} -func (FieldBehavior) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_field_behavior_ddf5c982f789c6a3, []int{0} -} - -var E_FieldBehavior = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: ([]FieldBehavior)(nil), - Field: 1052, - Name: "google.api.field_behavior", - Tag: "varint,1052,rep,name=field_behavior,json=fieldBehavior,enum=google.api.FieldBehavior", - Filename: "google/api/field_behavior.proto", -} - -func init() { - proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) - proto.RegisterExtension(E_FieldBehavior) -} - -func init() { - proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_field_behavior_ddf5c982f789c6a3) -} - -var fileDescriptor_field_behavior_ddf5c982f789c6a3 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30, - 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28, - 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda, - 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9, - 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99, - 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00, - 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea, - 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09, - 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53, - 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14, - 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1, - 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd, - 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd, - 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17, - 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6, - 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02, - 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb, - 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5, - 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go deleted file mode 100644 index ca20ad3d615..00000000000 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ /dev/null @@ -1,745 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/http.proto - -package annotations // import "google.golang.org/genproto/googleapis/api/annotations" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Http) Reset() { *m = Http{} } -func (m *Http) String() string { return proto.CompactTextString(m) } -func (*Http) ProtoMessage() {} -func (*Http) Descriptor() ([]byte, []int) { - return fileDescriptor_http_5af6bbacbb935ee3, []int{0} -} -func (m *Http) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Http.Unmarshal(m, b) -} -func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Http.Marshal(b, m, deterministic) -} -func (dst *Http) XXX_Merge(src proto.Message) { - xxx_messageInfo_Http.Merge(dst, src) -} -func (m *Http) XXX_Size() int { - return xxx_messageInfo_Http.Size(m) -} -func (m *Http) XXX_DiscardUnknown() { - xxx_messageInfo_Http.DiscardUnknown(m) -} - -var xxx_messageInfo_Http proto.InternalMessageInfo - -func (m *Http) GetRules() []*HttpRule { - if m != nil { - return m.Rules - } - return nil -} - -func (m *Http) GetFullyDecodeReservedExpansion() bool { - if m != nil { - return m.FullyDecodeReservedExpansion - } - return false -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -type HttpRule struct { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are valid to be assigned to Pattern: - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { - return fileDescriptor_http_5af6bbacbb935ee3, []int{1} -} -func (m *HttpRule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HttpRule.Unmarshal(m, b) -} -func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) -} -func (dst *HttpRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_HttpRule.Merge(dst, src) -} -func (m *HttpRule) XXX_Size() int { - return xxx_messageInfo_HttpRule.Size(m) -} -func (m *HttpRule) XXX_DiscardUnknown() { - xxx_messageInfo_HttpRule.DiscardUnknown(m) -} - -var xxx_messageInfo_HttpRule proto.InternalMessageInfo - -func (m *HttpRule) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} - -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` -} - -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` -} - -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (m *HttpRule) GetBody() string { - if m != nil { - return m.Body - } - return "" -} - -func (m *HttpRule) GetResponseBody() string { - if m != nil { - return m.ResponseBody - } - return "" -} - -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } -} - -func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Get) - case *HttpRule_Put: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Put) - case *HttpRule_Post: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Post) - case *HttpRule_Delete: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Delete) - case *HttpRule_Patch: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Patch) - case *HttpRule_Custom: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Custom); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) - } - return nil -} - -func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*HttpRule) - switch tag { - case 2: // pattern.get - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Get{x} - return true, err - case 3: // pattern.put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Put{x} - return true, err - case 4: // pattern.post - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Post{x} - return true, err - case 5: // pattern.delete - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Delete{x} - return true, err - case 6: // pattern.patch - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Patch{x} - return true, err - case 8: // pattern.custom - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(CustomHttpPattern) - err := b.DecodeMessage(msg) - m.Pattern = &HttpRule_Custom{msg} - return true, err - default: - return false, nil - } -} - -func _HttpRule_OneofSizer(msg proto.Message) (n int) { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Get))) - n += len(x.Get) - case *HttpRule_Put: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Put))) - n += len(x.Put) - case *HttpRule_Post: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Post))) - n += len(x.Post) - case *HttpRule_Delete: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Delete))) - n += len(x.Delete) - case *HttpRule_Patch: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Patch))) - n += len(x.Patch) - case *HttpRule_Custom: - s := proto.Size(x.Custom) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return fileDescriptor_http_5af6bbacbb935ee3, []int{2} -} -func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) -} -func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) -} -func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomHttpPattern.Merge(dst, src) -} -func (m *CustomHttpPattern) XXX_Size() int { - return xxx_messageInfo_CustomHttpPattern.Size(m) -} -func (m *CustomHttpPattern) XXX_DiscardUnknown() { - xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo - -func (m *CustomHttpPattern) GetKind() string { - if m != nil { - return m.Kind - } - return "" -} - -func (m *CustomHttpPattern) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func init() { - proto.RegisterType((*Http)(nil), "google.api.Http") - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_5af6bbacbb935ee3) } - -var fileDescriptor_http_5af6bbacbb935ee3 = []byte{ - // 419 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, - 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, - 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, - 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, - 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, - 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, - 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, - 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, - 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, - 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, - 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, - 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, - 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, - 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, - 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, - 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, - 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, - 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, - 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, - 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, - 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, - 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, - 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, - 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, - 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, - 0x02, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go deleted file mode 100644 index 036ae3e16bb..00000000000 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ /dev/null @@ -1,321 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/resource.proto - -package annotations // import "google.golang.org/genproto/googleapis/api/annotations" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A description of the historical or future-looking state of the -// resource pattern. -type ResourceDescriptor_History int32 - -const ( - // The "unset" value. - ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 -) - -var ResourceDescriptor_History_name = map[int32]string{ - 0: "HISTORY_UNSPECIFIED", - 1: "ORIGINALLY_SINGLE_PATTERN", - 2: "FUTURE_MULTI_PATTERN", -} -var ResourceDescriptor_History_value = map[string]int32{ - "HISTORY_UNSPECIFIED": 0, - "ORIGINALLY_SINGLE_PATTERN": 1, - "FUTURE_MULTI_PATTERN": 2, -} - -func (x ResourceDescriptor_History) String() string { - return proto.EnumName(ResourceDescriptor_History_name, int32(x)) -} -func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_resource_1953877c7bf00bf4, []int{0, 0} -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } -type ResourceDescriptor struct { - // The full name of the resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The resource type names are - // singular and do not contain version numbers. - // - // For example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Z][a-zA-Z0-9]+/. It must start with upper case character and - // recommended to use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the resource_type_kind is 100. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Required. The valid pattern or patterns for this resource's names. - // - // Examples: - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceDescriptor) Reset() { *m = ResourceDescriptor{} } -func (m *ResourceDescriptor) String() string { return proto.CompactTextString(m) } -func (*ResourceDescriptor) ProtoMessage() {} -func (*ResourceDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_resource_1953877c7bf00bf4, []int{0} -} -func (m *ResourceDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceDescriptor.Unmarshal(m, b) -} -func (m *ResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceDescriptor.Marshal(b, m, deterministic) -} -func (dst *ResourceDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceDescriptor.Merge(dst, src) -} -func (m *ResourceDescriptor) XXX_Size() int { - return xxx_messageInfo_ResourceDescriptor.Size(m) -} -func (m *ResourceDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceDescriptor proto.InternalMessageInfo - -func (m *ResourceDescriptor) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ResourceDescriptor) GetPattern() []string { - if m != nil { - return m.Pattern - } - return nil -} - -func (m *ResourceDescriptor) GetNameField() string { - if m != nil { - return m.NameField - } - return "" -} - -func (m *ResourceDescriptor) GetHistory() ResourceDescriptor_History { - if m != nil { - return m.History - } - return ResourceDescriptor_HISTORY_UNSPECIFIED -} - -// An annotation designating that this field is a reference to a resource -// defined by another message. -type ResourceReference struct { - // The unified resource type name of the type that this field references. - // Marks this as a field referring to a resource in another message. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type = "pubsub.googleapis.com/Topic" - // }]; - // } - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The fully-qualified message name of a child of the type that this field - // references. - // - // This is useful for `parent` fields where a resource has more than one - // possible type of parent. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - // - // If the referenced message is in the same proto package, the service name - // may be omitted: - // - // message ListLogEntriesRequest { - // string parent = 1 - // [(google.api.resource_reference).child_type = "LogEntry"]; - // } - ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceReference) Reset() { *m = ResourceReference{} } -func (m *ResourceReference) String() string { return proto.CompactTextString(m) } -func (*ResourceReference) ProtoMessage() {} -func (*ResourceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_resource_1953877c7bf00bf4, []int{1} -} -func (m *ResourceReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceReference.Unmarshal(m, b) -} -func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic) -} -func (dst *ResourceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceReference.Merge(dst, src) -} -func (m *ResourceReference) XXX_Size() int { - return xxx_messageInfo_ResourceReference.Size(m) -} -func (m *ResourceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceReference proto.InternalMessageInfo - -func (m *ResourceReference) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ResourceReference) GetChildType() string { - if m != nil { - return m.ChildType - } - return "" -} - -var E_ResourceReference = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*ResourceReference)(nil), - Field: 1055, - Name: "google.api.resource_reference", - Tag: "bytes,1055,opt,name=resource_reference,json=resourceReference", - Filename: "google/api/resource.proto", -} - -var E_Resource = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource", - Tag: "bytes,1053,opt,name=resource", - Filename: "google/api/resource.proto", -} - -func init() { - proto.RegisterType((*ResourceDescriptor)(nil), "google.api.ResourceDescriptor") - proto.RegisterType((*ResourceReference)(nil), "google.api.ResourceReference") - proto.RegisterEnum("google.api.ResourceDescriptor_History", ResourceDescriptor_History_name, ResourceDescriptor_History_value) - proto.RegisterExtension(E_ResourceReference) - proto.RegisterExtension(E_Resource) -} - -func init() { proto.RegisterFile("google/api/resource.proto", fileDescriptor_resource_1953877c7bf00bf4) } - -var fileDescriptor_resource_1953877c7bf00bf4 = []byte{ - // 430 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x41, 0x6f, 0xd3, 0x30, - 0x18, 0x25, 0x59, 0x45, 0xd7, 0x0f, 0x31, 0x6d, 0x06, 0x89, 0x0c, 0x29, 0x10, 0xf5, 0x80, 0x7a, - 0x4a, 0xa4, 0x71, 0x1b, 0x17, 0x3a, 0x96, 0x76, 0x91, 0xba, 0x36, 0x72, 0xd3, 0xc3, 0x00, 0x29, - 0xf2, 0xd2, 0xaf, 0x59, 0xa4, 0xcc, 0xb6, 0x9c, 0xec, 0xd0, 0x1b, 0x7f, 0x04, 0x21, 0xf1, 0x2b, - 0x39, 0xa2, 0x3a, 0x71, 0x98, 0xd8, 0xb4, 0x9b, 0xf3, 0xde, 0xfb, 0xbe, 0xf7, 0xfc, 0x1c, 0x38, - 0xce, 0x85, 0xc8, 0x4b, 0x0c, 0x98, 0x2c, 0x02, 0x85, 0x95, 0xb8, 0x53, 0x19, 0xfa, 0x52, 0x89, - 0x5a, 0x10, 0x68, 0x28, 0x9f, 0xc9, 0xe2, 0xad, 0xd7, 0xca, 0x34, 0x73, 0x7d, 0xb7, 0x09, 0xd6, - 0x58, 0x65, 0xaa, 0x90, 0xb5, 0x50, 0x8d, 0x7a, 0xf8, 0xc3, 0x06, 0x42, 0xdb, 0x05, 0xe7, 0x1d, - 0x49, 0x08, 0xf4, 0xea, 0xad, 0x44, 0xc7, 0xf2, 0xac, 0xd1, 0x80, 0xea, 0x33, 0x71, 0xa0, 0x2f, - 0x59, 0x5d, 0xa3, 0xe2, 0x8e, 0xed, 0xed, 0x8d, 0x06, 0xd4, 0x7c, 0x12, 0x17, 0x80, 0xb3, 0x5b, - 0x4c, 0x37, 0x05, 0x96, 0x6b, 0x67, 0x4f, 0xcf, 0x0c, 0x76, 0xc8, 0x64, 0x07, 0x90, 0xcf, 0xd0, - 0xbf, 0x29, 0xaa, 0x5a, 0xa8, 0xad, 0xd3, 0xf3, 0xac, 0xd1, 0xc1, 0xc9, 0x07, 0xff, 0x5f, 0x46, - 0xff, 0xa1, 0xbb, 0x7f, 0xd1, 0xa8, 0xa9, 0x19, 0x1b, 0x7e, 0x83, 0x7e, 0x8b, 0x91, 0x37, 0xf0, - 0xea, 0x22, 0x5a, 0x26, 0x0b, 0x7a, 0x95, 0xae, 0xe6, 0xcb, 0x38, 0xfc, 0x12, 0x4d, 0xa2, 0xf0, - 0xfc, 0xf0, 0x19, 0x71, 0xe1, 0x78, 0x41, 0xa3, 0x69, 0x34, 0x1f, 0xcf, 0x66, 0x57, 0xe9, 0x32, - 0x9a, 0x4f, 0x67, 0x61, 0x1a, 0x8f, 0x93, 0x24, 0xa4, 0xf3, 0x43, 0x8b, 0x38, 0xf0, 0x7a, 0xb2, - 0x4a, 0x56, 0x34, 0x4c, 0x2f, 0x57, 0xb3, 0x24, 0xea, 0x18, 0x7b, 0x38, 0x81, 0x23, 0x93, 0x81, - 0xe2, 0x06, 0x15, 0xf2, 0x0c, 0x1f, 0x2d, 0xc0, 0x05, 0xc8, 0x6e, 0x8a, 0x72, 0x9d, 0x6a, 0xc6, - 0x6e, 0xae, 0xa9, 0x91, 0x64, 0x2b, 0xf1, 0xb4, 0x04, 0x62, 0x9e, 0x22, 0x55, 0xdd, 0x22, 0xd7, - 0xdc, 0xd5, 0xbc, 0x81, 0xaf, 0x4b, 0x59, 0xc8, 0xba, 0x10, 0xbc, 0x72, 0x7e, 0xed, 0x7b, 0xd6, - 0xe8, 0xc5, 0x89, 0xfb, 0x58, 0x23, 0x5d, 0x1a, 0x7a, 0xa4, 0xfe, 0x87, 0x4e, 0xbf, 0xc3, 0xbe, - 0x01, 0xc9, 0xfb, 0x07, 0x1e, 0x97, 0x58, 0x55, 0x2c, 0x47, 0xe3, 0xf2, 0xb3, 0x71, 0x79, 0xf7, - 0x74, 0xef, 0xb4, 0xdb, 0x78, 0xc6, 0xe1, 0x20, 0x13, 0xb7, 0xf7, 0xe4, 0x67, 0x2f, 0x8d, 0x3e, - 0xde, 0x79, 0xc4, 0xd6, 0xd7, 0x71, 0x4b, 0xe6, 0xa2, 0x64, 0x3c, 0xf7, 0x85, 0xca, 0x83, 0x1c, - 0xb9, 0x4e, 0x10, 0x34, 0x14, 0x93, 0x45, 0xa5, 0xff, 0x50, 0xc6, 0xb9, 0xa8, 0x99, 0x8e, 0xf2, - 0xe9, 0xde, 0xf9, 0x8f, 0x65, 0xfd, 0xb6, 0x7b, 0xd3, 0x71, 0x1c, 0x5d, 0x3f, 0xd7, 0x73, 0x1f, - 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x1e, 0x07, 0x80, 0xd8, 0x02, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83439b5627d..4e26f6a1d6b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -138,7 +138,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. - var isettings []http2.Setting + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go index 9d212867ce2..8f5f3349d90 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -667,6 +667,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList writer: w, fr: http2.NewFramer(w, r), } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/version.go b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go index 5411a73a22e..58885056385 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/version.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.23.0" +const Version = "1.23.1" diff --git a/cluster-autoscaler/vendor/gopkg.in/inf.v0/dec.go b/cluster-autoscaler/vendor/gopkg.in/inf.v0/dec.go index 3b4afedf1a3..26548b63cef 100644 --- a/cluster-autoscaler/vendor/gopkg.in/inf.v0/dec.go +++ b/cluster-autoscaler/vendor/gopkg.in/inf.v0/dec.go @@ -104,7 +104,7 @@ var bigInt = [...]*big.Int{ var exp10cache [64]big.Int = func() [64]big.Int { e10, e10i := [64]big.Int{}, bigInt[1] - for i, _ := range e10 { + for i := range e10 { e10[i].Set(e10i) e10i = new(big.Int).Mul(e10i, bigInt[10]) } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go index e4e56e28e0e..53108765555 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db6fbd..4120e0c9160 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1dd2d4..570b8ecd10f 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go @@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. @@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 17de1a587aa..086cbcc7986 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -131,7 +131,7 @@ message MutatingWebhook { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -385,7 +385,7 @@ message ValidatingWebhook { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index cf065a286c6..37a993e3e5a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -275,7 +275,7 @@ type ValidatingWebhook struct { // +optional ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -407,7 +407,7 @@ type MutatingWebhook struct { // +optional ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index f40a15d50cb..d9fb5af8fcb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -36,7 +36,7 @@ var map_MutatingWebhook = map[string]string{ "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", @@ -108,7 +108,7 @@ var map_ValidatingWebhook = map[string]string{ "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types.go index 519bd786940..55b2a0d6b68 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types.go @@ -151,7 +151,7 @@ type ScaleStatus struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -322,7 +322,7 @@ type MetricStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 92fe5a2b156..53a53a3a9c7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -62,7 +62,7 @@ type HorizontalPodAutoscalerSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -231,7 +231,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 314d70950c5..4480c7da8d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -120,7 +120,7 @@ type MetricSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -221,7 +221,7 @@ type MetricTarget struct { // "Value", "AverageValue", or "Utilization" type MetricTargetType string -var ( +const ( // UtilizationMetricType declares a MetricTarget is an AverageUtilization value UtilizationMetricType MetricTargetType = "Utilization" // ValueMetricType declares a MetricTarget is a raw value @@ -262,7 +262,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go index bb9e82d3082..93f81cd529b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -129,27 +129,27 @@ type CertificateSigningRequestList struct { type KeyUsage string const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content commitment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapSGC KeyUsage = "netscape sgc" + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommitment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapeSGC KeyUsage = "netscape sgc" ) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/core/v1/BUILD index ee4450562a0..51b5b284813 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/BUILD @@ -29,6 +29,7 @@ go_library( "types.go", "types_swagger_doc_generated.go", "well_known_labels.go", + "well_known_taints.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/api/core/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go index 8f788035e62..732385ce92a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go @@ -6000,855 +6000,859 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13567 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, - 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x9d, 0xd1, 0x6c, - 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, - 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb2, 0x77, 0x5c, 0xa9, 0x2b, 0x25, 0xd5, - 0xaa, 0xbb, 0xaa, 0xb7, 0xaa, 0x5a, 0x33, 0xda, 0x1f, 0xc4, 0x0f, 0x1f, 0xcf, 0x33, 0xe0, 0xb8, - 0xb0, 0x09, 0x3f, 0x80, 0xc0, 0x11, 0x18, 0x07, 0x60, 0xb0, 0xc3, 0x18, 0x0c, 0x98, 0xc3, 0x36, - 0x06, 0xdb, 0x81, 0xfd, 0x07, 0xc6, 0x0e, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, - 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, - 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7e, 0xf9, 0x3d, 0xe1, - 0xd5, 0xdd, 0x57, 0xa2, 0x05, 0x2f, 0xb8, 0xb2, 0xdb, 0xd9, 0x24, 0xa1, 0x4f, 0x62, 0x12, 0x5d, - 0xd9, 0x23, 0xbe, 0x1b, 0x84, 0x57, 0x04, 0xc0, 0x69, 0x7b, 0x57, 0x1a, 0x41, 0x48, 0xae, 0xec, - 0x3d, 0x77, 0x65, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xe2, 0x2e, 0xb4, 0xc3, 0x20, 0x0e, 0x10, 0xe2, - 0x38, 0x0b, 0x4e, 0xdb, 0x5b, 0xa0, 0x38, 0x0b, 0x7b, 0xcf, 0xcd, 0x3d, 0xbb, 0xed, 0xc5, 0x3b, - 0x9d, 0xcd, 0x85, 0x46, 0xd0, 0xba, 0xb2, 0x1d, 0x6c, 0x07, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b, - 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xa4, 0x99, 0x96, 0xd3, 0xd8, 0xf1, 0x7c, - 0x12, 0xee, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x41, 0xd2, 0x0d, - 0xf7, 0xac, 0x15, 0x5d, 0x69, 0x91, 0xd8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x61, 0xc7, - 0x8f, 0xbd, 0x56, 0x77, 0x33, 0x1f, 0xef, 0x57, 0x21, 0x6a, 0xec, 0x90, 0x96, 0xd3, 0x55, 0xef, - 0x85, 0xbc, 0x7a, 0x9d, 0xd8, 0x6b, 0x5e, 0xf1, 0xfc, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x2b, - 0x16, 0x5c, 0x5c, 0xbc, 0x53, 0x5f, 0x69, 0x3a, 0x51, 0xec, 0x35, 0x96, 0x9a, 0x41, 0x63, 0xb7, - 0x1e, 0x07, 0x21, 0xb9, 0x1d, 0x34, 0x3b, 0x2d, 0x52, 0x67, 0x03, 0x81, 0x9e, 0x81, 0xd2, 0x1e, - 0xfb, 0x5f, 0xad, 0xcc, 0x5a, 0x17, 0xad, 0xcb, 0xe5, 0xa5, 0xe9, 0xdf, 0x38, 0x98, 0xff, 0xd0, - 0xfd, 0x83, 0xf9, 0xd2, 0x6d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x23, 0x5b, 0xd1, 0xc6, 0x7e, - 0x9b, 0xcc, 0x16, 0x18, 0xee, 0xa4, 0xc0, 0x1d, 0x59, 0xad, 0xd3, 0x52, 0x2c, 0xa0, 0xe8, 0x0a, - 0x94, 0xdb, 0x4e, 0x18, 0x7b, 0xb1, 0x17, 0xf8, 0xb3, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, 0xa5, 0x19, - 0x81, 0x5a, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb8, 0x37, 0xfd, 0xe6, 0xfe, - 0xec, 0xd0, 0x45, 0xeb, 0x72, 0x29, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x54, 0x80, - 0xd2, 0xe2, 0xd6, 0x96, 0xe7, 0x7b, 0xf1, 0x3e, 0xba, 0x0d, 0xe3, 0x7e, 0xe0, 0x12, 0xf9, 0x9f, - 0x7d, 0xc5, 0xd8, 0xf3, 0x17, 0x17, 0xba, 0x97, 0xd2, 0xc2, 0xba, 0x86, 0xb7, 0x34, 0x7d, 0xff, - 0x60, 0x7e, 0x5c, 0x2f, 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xac, 0x1d, 0xb8, 0x8a, 0x6c, 0x81, 0x91, - 0x9d, 0xcf, 0x22, 0x5b, 0x4b, 0xd0, 0x96, 0xa6, 0xee, 0x1f, 0xcc, 0x8f, 0x69, 0x05, 0x58, 0x27, - 0x82, 0x36, 0x61, 0x8a, 0xfe, 0xf5, 0x63, 0x4f, 0xd1, 0x2d, 0x32, 0xba, 0x4f, 0xe4, 0xd1, 0xd5, - 0x50, 0x97, 0x4e, 0xdd, 0x3f, 0x98, 0x9f, 0x4a, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x5d, 0x98, 0x5c, - 0x8c, 0x63, 0xa7, 0xb1, 0x43, 0x5c, 0x3e, 0x83, 0xe8, 0x45, 0x18, 0xf2, 0x9d, 0x16, 0x11, 0xf3, - 0x7b, 0x51, 0x0c, 0xec, 0xd0, 0xba, 0xd3, 0x22, 0x87, 0x07, 0xf3, 0xd3, 0xb7, 0x7c, 0xef, 0x9d, - 0x8e, 0x58, 0x15, 0xb4, 0x0c, 0x33, 0x6c, 0xf4, 0x3c, 0x80, 0x4b, 0xf6, 0xbc, 0x06, 0xa9, 0x39, - 0xf1, 0x8e, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x45, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x83, 0xf2, 0xe2, - 0x5e, 0xe0, 0xb9, 0xb5, 0xc0, 0x8d, 0xd0, 0x2e, 0x4c, 0xb5, 0x43, 0xb2, 0x45, 0x42, 0x55, 0x34, - 0x6b, 0x5d, 0x2c, 0x5e, 0x1e, 0x7b, 0xfe, 0x72, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, 0xee, - 0x2f, 0x3d, 0x22, 0xda, 0x9b, 0x4a, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0x2f, 0x0b, 0x70, 0x66, 0xf1, - 0xdd, 0x4e, 0x48, 0x2a, 0x5e, 0xb4, 0x9b, 0x5e, 0xe1, 0xae, 0x17, 0xed, 0xae, 0x27, 0x23, 0xa0, - 0x96, 0x56, 0x45, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x85, 0x51, 0xfa, 0xfb, 0x16, 0xae, 0x8a, 0x4f, - 0x3e, 0x25, 0x90, 0xc7, 0x2a, 0x4e, 0xec, 0x54, 0x38, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0x63, 0x0d, - 0xb6, 0x21, 0xb7, 0xd7, 0x02, 0x97, 0xb0, 0xc9, 0x2c, 0x2f, 0x3d, 0x4d, 0xd1, 0x97, 0x93, 0xe2, - 0xc3, 0x83, 0xf9, 0x59, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, 0xd5, 0xfe, 0x1a, - 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd6, 0xb6, 0xca, 0x30, 0xdb, 0x2a, 0xe3, 0xd9, 0xdb, 0x04, - 0x3d, 0x07, 0x43, 0xbb, 0x9e, 0xef, 0xce, 0x8e, 0x30, 0x5a, 0xe7, 0xe9, 0x9c, 0x5f, 0xf7, 0x7c, - 0xf7, 0xf0, 0x60, 0x7e, 0xc6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0xe6, 0x19, - 0x6c, 0xd5, 0x6b, 0x92, 0x1a, 0x09, 0x23, 0x2f, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0xfa, 0x3c, 0x40, - 0x44, 0x1a, 0x21, 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xae, 0x20, 0x58, 0xc3, 0xa2, 0x07, 0x42, - 0xb4, 0xe3, 0x84, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x5d, 0x02, 0x70, 0x82, 0x63, 0x1c, - 0x08, 0xc5, 0x7e, 0x07, 0x02, 0xfa, 0x14, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, 0x07, 0x90, - 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x59, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, - 0x7f, 0xab, 0xfd, 0x4b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0xf3, 0x50, 0xa2, - 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa6, 0xed, 0x2d, 0x75, 0x55, 0x2c, 0xb4, 0x77, - 0xb7, 0x69, 0x41, 0xb4, 0x40, 0xb1, 0xe9, 0x6e, 0xbb, 0xb9, 0xf9, 0x36, 0x69, 0xc4, 0x6b, 0x24, - 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc3, 0x48, 0xec, 0x84, 0xdb, 0x24, 0x16, - 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, 0xb0, 0xc1, - 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0x81, 0x51, 0x38, 0xb7, 0x5c, 0xaf, 0xe6, 0xac, 0xab, 0x4b, 0x30, - 0xe2, 0x86, 0xde, 0x1e, 0x09, 0xc5, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x02, - 0xe3, 0xfc, 0x42, 0xba, 0xe6, 0xf8, 0x6e, 0x53, 0x0e, 0xf1, 0x69, 0x81, 0x3d, 0x7e, 0x5b, 0x83, - 0x61, 0x03, 0xf3, 0x88, 0x8b, 0xea, 0x52, 0x6a, 0x33, 0xe6, 0x5d, 0x76, 0x5f, 0xb4, 0x60, 0x9a, - 0x37, 0xb3, 0x18, 0xc7, 0xa1, 0xb7, 0xd9, 0x89, 0x49, 0x34, 0x3b, 0xcc, 0x4e, 0xba, 0xe5, 0xac, - 0xd1, 0xca, 0x1d, 0x81, 0x85, 0xdb, 0x29, 0x2a, 0xfc, 0x10, 0x9c, 0x15, 0xed, 0x4e, 0xa7, 0xc1, - 0xb8, 0xab, 0x59, 0xf4, 0x1d, 0x16, 0xcc, 0x35, 0x02, 0x3f, 0x0e, 0x83, 0x66, 0x93, 0x84, 0xb5, - 0xce, 0x66, 0xd3, 0x8b, 0x76, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8, 0x90, - 0xc4, 0x1c, 0x5e, 0xb8, 0x7f, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0xf7, 0x68, 0x06, 0xed, 0x02, - 0xa2, 0x57, 0x69, 0x3d, 0x76, 0xb6, 0x49, 0xd2, 0xf8, 0xe8, 0xe0, 0x8d, 0x9f, 0xbd, 0x7f, 0x30, - 0x8f, 0xd6, 0xbb, 0x48, 0xe0, 0x0c, 0xb2, 0xe8, 0x1d, 0x38, 0x4d, 0x4b, 0xbb, 0xbe, 0xb5, 0x34, - 0x78, 0x73, 0xb3, 0xf7, 0x0f, 0xe6, 0x4f, 0xaf, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0xb7, 0x5b, - 0x70, 0x2e, 0xf9, 0xfc, 0x95, 0x7b, 0x6d, 0xc7, 0x77, 0x93, 0x86, 0xcb, 0x83, 0x37, 0x4c, 0xcf, - 0xe4, 0x73, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x26, 0x73, 0xb5, 0xa0, 0x69, - 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x31, 0xfd, 0x89, 0x4e, 0xc3, 0xf0, 0x9e, 0xd3, 0xec, 0x88, - 0x8d, 0x82, 0xf9, 0x9f, 0x4f, 0x14, 0x5e, 0xb1, 0xec, 0x7f, 0x55, 0x84, 0xa9, 0xe5, 0x7a, 0xf5, - 0x81, 0x76, 0xa1, 0x7e, 0x0d, 0x15, 0x7a, 0x5e, 0x43, 0xc9, 0xa5, 0x56, 0xcc, 0xbd, 0xd4, 0xfe, - 0xff, 0x8c, 0x2d, 0x34, 0xc4, 0xb6, 0xd0, 0x37, 0xe4, 0x6c, 0xa1, 0x63, 0xde, 0x38, 0x7b, 0x39, - 0xab, 0x68, 0x98, 0x4d, 0x66, 0x26, 0xc7, 0x72, 0x23, 0x68, 0x38, 0xcd, 0xf4, 0xd1, 0x77, 0xc4, - 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, 0x48, - 0x84, 0x9e, 0x84, 0xa2, 0xe3, 0xba, 0x8c, 0xdb, 0x2a, 0x2f, 0x9d, 0xb9, 0x7f, 0x30, 0x5f, 0x5c, - 0x74, 0xe9, 0xb5, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x51, 0x18, 0x72, 0xc3, 0xa0, 0x3d, - 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, 0x16, - 0xe0, 0xb1, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe7, 0x9c, 0xdf, 0x97, 0xa1, 0xd4, 0x0a, 0x7c, 0x2f, - 0x0e, 0xc2, 0x48, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0x2e, 0xc2, 0x50, 0x3b, - 0x61, 0x2a, 0xc7, 0x25, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x62, 0xc5, - 0x28, 0x8c, 0x5b, 0x11, 0x09, 0x31, 0x83, 0x24, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xba, - 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0xe5, 0x28, 0x35, 0xb3, 0x03, 0x6d, 0xd3, 0x09, 0x76, - 0x75, 0xab, 0x99, 0x4c, 0x88, 0x18, 0x37, 0xca, 0x48, 0xdf, 0xab, 0xfb, 0xcb, 0x05, 0x40, 0x7c, - 0x08, 0xff, 0x82, 0x0d, 0xdc, 0xad, 0xee, 0x81, 0x1b, 0x7c, 0x4b, 0x1c, 0xd7, 0xe8, 0xfd, 0xa9, - 0x05, 0x8f, 0x2d, 0x7b, 0xbe, 0x4b, 0xc2, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x47, 0x63, 0x1a, - 0x8c, 0x25, 0x36, 0x74, 0x0c, 0x4b, 0xcc, 0xfe, 0x23, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, 0xc7, - 0xde, 0xea, 0xfe, 0xd8, 0x63, 0x58, 0x16, 0xf6, 0x0d, 0x98, 0x5c, 0x6e, 0x7a, 0xc4, 0x8f, 0xab, - 0xb5, 0xe5, 0xc0, 0xdf, 0xf2, 0xb6, 0xd1, 0x27, 0x60, 0x32, 0xf6, 0x5a, 0x24, 0xe8, 0xc4, 0x75, - 0xd2, 0x08, 0x7c, 0xf6, 0x92, 0xb4, 0x2e, 0x0f, 0x2f, 0xa1, 0xfb, 0x07, 0xf3, 0x93, 0x1b, 0x06, - 0x04, 0xa7, 0x30, 0xed, 0xdf, 0xa1, 0xe3, 0x17, 0xb4, 0xda, 0x81, 0x4f, 0xfc, 0x78, 0x39, 0xf0, - 0x5d, 0x2e, 0x71, 0xf8, 0x04, 0x0c, 0xc5, 0x74, 0x3c, 0xf8, 0xd8, 0x5d, 0x92, 0x1b, 0x85, 0x8e, - 0xc2, 0xe1, 0xc1, 0xfc, 0xd9, 0xee, 0x1a, 0x6c, 0x9c, 0x58, 0x1d, 0xf4, 0x0d, 0x30, 0x12, 0xc5, - 0x4e, 0xdc, 0x89, 0xc4, 0x68, 0x3e, 0x2e, 0x47, 0xb3, 0xce, 0x4a, 0x0f, 0x0f, 0xe6, 0xa7, 0x54, - 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x8c, 0xb6, 0x48, 0x14, 0x39, 0xdb, 0xf2, 0x36, 0x9c, - 0x12, 0x75, 0x47, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, - 0x47, 0x27, 0x04, 0xe2, 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x3b, 0x0b, 0xa6, 0x54, 0x5f, - 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xd8, 0xf3, - 0x97, 0x32, 0x2f, 0xea, 0xae, 0x61, 0x4c, 0x28, 0xab, 0xa2, 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x6a, - 0xc1, 0xa9, 0xd4, 0x17, 0xdd, 0xf0, 0xa2, 0x18, 0xbd, 0xd5, 0xf5, 0x55, 0x0b, 0x83, 0x7d, 0x15, - 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0xf6, 0x62, 0xd2, 0x92, - 0x1f, 0xf3, 0x44, 0xcf, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0xa4, 0x4a, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, - 0x6b, 0x45, 0x28, 0xf3, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0xaa, 0x30, 0xc4, 0xa8, 0xf3, - 0x8e, 0x3f, 0x99, 0xdd, 0x71, 0xd1, 0x9d, 0x05, 0xfa, 0xe4, 0xe7, 0xcc, 0x91, 0xba, 0x1a, 0x68, - 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe9, 0xf9, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, - 0xb3, 0xbd, 0x09, 0x2e, 0x29, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xcb, - 0x50, 0x56, 0xc8, 0x47, 0xe1, 0x71, 0xe6, 0x3e, 0x05, 0x53, 0xa9, 0xb6, 0xfa, 0x55, 0x1f, 0xd7, - 0x59, 0xa4, 0x5f, 0x66, 0xa7, 0x80, 0xe8, 0xf5, 0x8a, 0xbf, 0x27, 0x4e, 0xd1, 0x77, 0xe1, 0x74, - 0x33, 0xe3, 0x70, 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x98, 0xf8, 0xec, 0xd3, 0x59, 0x50, 0x9c, - 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x36, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8a, 0x32, 0xac, - 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x9d, 0xec, 0xd7, 0x49, 0x93, 0x34, 0xe2, 0x20, 0xfc, - 0x9a, 0x76, 0xff, 0x3c, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x09, 0x02, 0xc5, 0xeb, 0x64, 0x9f, 0x4f, - 0x85, 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xb3, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x81, 0xad, 0xbe, - 0x64, 0x6e, 0xf5, 0xf3, 0x3d, 0x17, 0x78, 0xce, 0x26, 0xff, 0x72, 0x01, 0xce, 0x29, 0x1c, 0xca, - 0xee, 0xf3, 0x3f, 0x62, 0x55, 0x5d, 0x81, 0xb2, 0xaf, 0x04, 0x51, 0x96, 0x29, 0x01, 0x4a, 0xc4, - 0x50, 0x09, 0x0e, 0xe5, 0xda, 0xfc, 0x44, 0x5a, 0x34, 0xae, 0x4b, 0x68, 0x85, 0x34, 0x76, 0x09, - 0x8a, 0x1d, 0xcf, 0x15, 0x77, 0xc6, 0xc7, 0xe4, 0x68, 0xdf, 0xaa, 0x56, 0x0e, 0x0f, 0xe6, 0x1f, - 0xcf, 0xd3, 0x0e, 0xd0, 0xcb, 0x2a, 0x5a, 0xb8, 0x55, 0xad, 0x60, 0x5a, 0x19, 0x2d, 0xc2, 0x94, - 0x54, 0x80, 0xdc, 0xa6, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0x89, 0x59, 0xb1, 0x09, 0xc6, 0x69, - 0x7c, 0x54, 0x81, 0xe9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x98, 0x7f, 0xf0, 0x75, 0xc2, 0x85, 0x90, - 0xe5, 0xe4, 0xb1, 0x75, 0x3d, 0x05, 0xc7, 0x5d, 0x35, 0xec, 0x3f, 0x67, 0x47, 0xbc, 0x18, 0xbd, - 0x5a, 0x18, 0xd0, 0x85, 0x45, 0xa9, 0x7f, 0x2d, 0x97, 0xf3, 0x20, 0xab, 0xe2, 0x3a, 0xd9, 0xdf, - 0x08, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd4, 0x73, 0xcd, 0xff, 0x7c, 0x01, 0xce, - 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x1c, 0x8c, 0xb9, 0x64, 0xcb, 0xe9, 0x34, - 0x63, 0x25, 0x11, 0x1f, 0xe6, 0x5a, 0x91, 0x4a, 0x52, 0x8c, 0x75, 0x9c, 0x23, 0x0c, 0xdb, 0xff, - 0x1a, 0x63, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x80, 0x61, - 0xaf, 0x45, 0x79, 0xad, 0x82, 0xc9, 0x42, 0x55, 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x11, 0x18, 0x6d, - 0x04, 0xad, 0x96, 0xe3, 0xbb, 0xec, 0xca, 0x2b, 0x2f, 0x8d, 0x51, 0x76, 0x6c, 0x99, 0x17, 0x61, - 0x09, 0x43, 0x8f, 0xc1, 0x90, 0x13, 0x6e, 0x73, 0xb1, 0x44, 0x79, 0xa9, 0x44, 0x5b, 0x5a, 0x0c, - 0xb7, 0x23, 0xcc, 0x4a, 0xe9, 0xab, 0xea, 0x6e, 0x10, 0xee, 0x7a, 0xfe, 0x76, 0xc5, 0x0b, 0xc5, - 0x96, 0x50, 0x77, 0xe1, 0x1d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x85, 0xe1, 0x76, 0x10, 0xc6, 0xd1, - 0xec, 0x08, 0x1b, 0xee, 0xc7, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0xb5, 0x20, 0x8c, 0x93, 0x0f, 0xa0, - 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x1b, 0x30, 0x4a, 0xfc, 0xbd, 0xd5, 0x30, 0x68, 0xcd, 0x9e, 0xca, - 0xa7, 0xb4, 0xc2, 0x51, 0xf8, 0x32, 0x4b, 0xd8, 0x4e, 0x51, 0x8c, 0x25, 0x09, 0xf4, 0x0d, 0x50, - 0x24, 0xfe, 0xde, 0xec, 0x28, 0xa3, 0x34, 0x97, 0x43, 0xe9, 0xb6, 0x13, 0x26, 0x67, 0xfe, 0x8a, - 0xbf, 0x87, 0x69, 0x1d, 0xf4, 0x19, 0x28, 0xcb, 0x03, 0x23, 0x12, 0xf2, 0xb7, 0xcc, 0x05, 0x2b, - 0x8f, 0x19, 0x4c, 0xde, 0xe9, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x92, 0x13, 0x52, 0x42, 0x23, - 0x9c, 0x50, 0x43, 0x9f, 0x91, 0x42, 0xdf, 0xb5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, - 0x53, 0x1d, 0x77, 0x3b, 0xc1, 0x4b, 0x4b, 0x85, 0x79, 0x65, 0x6c, 0x90, 0x42, 0x9f, 0x85, 0x09, - 0xfe, 0x9f, 0x2b, 0xb5, 0xa2, 0xd9, 0x33, 0x8c, 0xf6, 0xc5, 0x7c, 0xda, 0x1c, 0x71, 0xe9, 0x8c, - 0x20, 0x3e, 0xa1, 0x97, 0x46, 0xd8, 0xa4, 0x86, 0x30, 0x4c, 0x34, 0xbd, 0x3d, 0xe2, 0x93, 0x28, - 0xaa, 0x85, 0xc1, 0x26, 0x99, 0x05, 0x36, 0x30, 0xe7, 0xb2, 0x95, 0x60, 0xc1, 0x26, 0x59, 0x9a, - 0xa1, 0x34, 0x6f, 0xe8, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x82, 0x49, 0xfa, 0x08, 0xf3, 0x12, 0xa2, - 0x63, 0xfd, 0x88, 0xb2, 0xa7, 0x12, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0x74, 0x13, 0xc6, 0xa3, 0xd8, - 0x09, 0xe3, 0x4e, 0x9b, 0x13, 0x3d, 0xdb, 0x8f, 0x28, 0xd3, 0xa1, 0xd6, 0xb5, 0x2a, 0xd8, 0x20, - 0x80, 0x5e, 0x87, 0x72, 0xd3, 0xdb, 0x22, 0x8d, 0xfd, 0x46, 0x93, 0xcc, 0x8e, 0x33, 0x6a, 0x99, - 0x87, 0xca, 0x0d, 0x89, 0xc4, 0x5f, 0x85, 0xea, 0x2f, 0x4e, 0xaa, 0xa3, 0xdb, 0x70, 0x36, 0x26, - 0x61, 0xcb, 0xf3, 0x1d, 0x7a, 0x18, 0x88, 0xd7, 0x12, 0xd3, 0x4d, 0x4e, 0xb0, 0xdd, 0x76, 0x41, - 0xcc, 0xc6, 0xd9, 0x8d, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x83, 0xd9, 0x0c, 0x48, 0xd0, 0xf4, - 0x1a, 0xfb, 0xb3, 0xa7, 0x19, 0xe5, 0x4f, 0x0a, 0xca, 0xb3, 0x1b, 0x39, 0x78, 0x87, 0x3d, 0x60, - 0x38, 0x97, 0x3a, 0xba, 0x09, 0x53, 0xec, 0x04, 0xaa, 0x75, 0x9a, 0x4d, 0xd1, 0xe0, 0x24, 0x6b, - 0xf0, 0x23, 0xf2, 0x3e, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, - 0xc9, 0xd4, 0x60, 0x9d, 0xd0, 0x8b, 0xf7, 0xe9, 0xb9, 0x41, 0xee, 0xc5, 0xb3, 0x53, 0x3d, 0x45, - 0x10, 0x3a, 0xaa, 0xd2, 0x95, 0xe9, 0x85, 0x38, 0x4d, 0x90, 0x1e, 0xa9, 0x51, 0xec, 0x7a, 0xfe, - 0xec, 0x34, 0x3b, 0xa9, 0xd5, 0x89, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0x0a, 0x8c, 0xfe, 0xb8, - 0x49, 0x6f, 0xae, 0x19, 0x86, 0x98, 0xa8, 0xc0, 0x24, 0x00, 0x27, 0x38, 0x94, 0x99, 0x8c, 0xe3, - 0xfd, 0x59, 0xc4, 0x50, 0xd5, 0xc1, 0xb2, 0xb1, 0xf1, 0x19, 0x4c, 0xcb, 0xed, 0x4d, 0x98, 0x54, - 0x07, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x8c, 0x7d, 0x12, 0x02, 0xb3, 0x32, 0xed, 0x02, 0x63, - 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x25, 0x4b, 0xfb, 0x31, 0xe1, 0xcf, 0xf4, 0xa2, 0xd6, - 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0x39, 0x1b, 0x9a, 0x9c, 0xb6, 0x03, 0xdc, 0x2f, 0xcf, - 0x40, 0x69, 0x27, 0x88, 0x62, 0x8a, 0xcd, 0xda, 0x18, 0x4e, 0x18, 0xcf, 0x6b, 0xa2, 0x1c, 0x2b, - 0x0c, 0xf4, 0x2a, 0x4c, 0x34, 0xf4, 0x06, 0xc4, 0xe5, 0xa8, 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, - 0xa2, 0x57, 0xa0, 0xc4, 0xcc, 0x3a, 0x1a, 0x41, 0x53, 0x70, 0x6d, 0xf2, 0x86, 0x2f, 0xd5, 0x44, - 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x09, 0x46, 0x68, 0x17, 0xaa, 0x35, 0x71, 0x2d, 0x29, - 0xd9, 0xcf, 0x35, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0xab, 0x05, 0x6d, 0x94, 0xe9, 0x13, 0x97, 0xa0, - 0x1a, 0x8c, 0xde, 0x75, 0xbc, 0xd8, 0xf3, 0xb7, 0x05, 0xff, 0xf1, 0x54, 0xcf, 0x3b, 0x8a, 0x55, - 0xba, 0xc3, 0x2b, 0xf0, 0x5b, 0x54, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, 0x52, - 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x16, 0x80, 0xdc, - 0x61, 0xc4, 0x15, 0xe6, 0x14, 0xcf, 0xf4, 0x27, 0xba, 0xa1, 0xea, 0x2c, 0x4d, 0xd2, 0x3b, 0x3a, - 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, 0xf8, 0xb4, 0xee, 0xce, 0xa0, 0x6f, 0xa6, 0x4b, 0xdc, 0x09, - 0x63, 0xe2, 0x2e, 0xc6, 0x62, 0x70, 0x3e, 0x3a, 0xd8, 0x23, 0x65, 0xc3, 0x6b, 0x11, 0x7d, 0x3b, - 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x62, 0x11, 0x66, 0xf3, 0xba, 0x4b, 0x17, 0x1d, 0xb9, 0xe7, - 0xc5, 0xcb, 0x94, 0xbd, 0xb2, 0xcc, 0x45, 0xb7, 0x22, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, 0x79, - 0xdb, 0xf2, 0x8d, 0x39, 0x9c, 0xcc, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x21, 0x71, 0x22, - 0x61, 0xaf, 0xa3, 0xad, 0x12, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x02, 0xac, 0xa1, 0x3e, 0x02, 0x2c, - 0x63, 0x88, 0x86, 0x8f, 0x77, 0x88, 0xd0, 0xe7, 0x00, 0xb6, 0x3c, 0xdf, 0x8b, 0x76, 0x18, 0xf5, - 0x91, 0x23, 0x53, 0x57, 0xcc, 0xd9, 0xaa, 0xa2, 0x82, 0x35, 0x8a, 0xe8, 0x25, 0x18, 0x53, 0x1b, - 0xb0, 0x5a, 0x61, 0xca, 0x4b, 0xcd, 0x18, 0x24, 0x39, 0x8d, 0x2a, 0x58, 0xc7, 0xb3, 0xdf, 0x4e, - 0xaf, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0x06, 0x1d, 0xdf, 0x42, 0xef, 0xf1, 0xb5, 0xbf, 0x5a, - 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x95, 0x1e, 0xe0, 0x4e, 0x4c, 0xc4, 0xfe, - 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x83, 0x72, 0xd3, 0x89, - 0x98, 0x30, 0x8c, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0xe4, 0x61, 0xe2, 0x44, 0xb1, 0x76, 0x6b, 0x72, - 0xda, 0x09, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x1a, 0x84, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, - 0x73, 0x18, 0x7a, 0x05, 0xc6, 0x43, 0xc2, 0x56, 0xc5, 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x4e, - 0xd8, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xbc, 0x0d, 0x46, 0x7a, 0xbc, 0x0d, 0x9e, 0x82, 0x51, - 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x4a, 0x83, 0x2d, - 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x29, 0x8e, 0x4b, 0xfc, 0x94, 0x13, 0x4b, 0x1e, 0x4b, 0x98, - 0xfd, 0x51, 0x98, 0xac, 0x38, 0xa4, 0x15, 0xf8, 0x2b, 0xbe, 0xdb, 0x0e, 0x3c, 0x3f, 0x46, 0xb3, - 0x30, 0xc4, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd1, 0x86, 0xf0, 0x10, 0x7d, 0x10, 0xd8, 0xdb, 0x70, - 0xa6, 0x12, 0xdc, 0xf5, 0xef, 0x3a, 0xa1, 0xbb, 0x58, 0xab, 0x6a, 0xef, 0xeb, 0x75, 0xf9, 0xbe, - 0xe3, 0x76, 0x58, 0x99, 0x47, 0xaf, 0x56, 0x93, 0xb3, 0xb5, 0xab, 0x5e, 0x93, 0xe4, 0x48, 0x41, - 0xfe, 0x46, 0xc1, 0x68, 0x29, 0xc1, 0x57, 0x8a, 0x2a, 0x2b, 0x57, 0x51, 0xf5, 0x06, 0x94, 0xb6, - 0x3c, 0xd2, 0x74, 0x31, 0xd9, 0x12, 0x2b, 0xf1, 0xc9, 0x7c, 0xd3, 0x92, 0x55, 0x8a, 0x29, 0xa5, - 0x5e, 0xfc, 0x75, 0xb8, 0x2a, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc2, 0xb4, 0x7c, 0x30, 0x48, 0xa8, - 0x58, 0x97, 0x4f, 0xf5, 0x7a, 0x85, 0x98, 0xc4, 0x4f, 0xdf, 0x3f, 0x98, 0x9f, 0xc6, 0x29, 0x32, - 0xb8, 0x8b, 0x30, 0x7d, 0x0e, 0xb6, 0xe8, 0x09, 0x3c, 0xc4, 0x86, 0x9f, 0x3d, 0x07, 0xd9, 0xcb, - 0x96, 0x95, 0xda, 0x3f, 0x62, 0xc1, 0x23, 0x5d, 0x23, 0x23, 0x5e, 0xf8, 0xc7, 0x3c, 0x0b, 0xe9, - 0x17, 0x77, 0xa1, 0xff, 0x8b, 0xdb, 0xfe, 0x19, 0x0b, 0x4e, 0xaf, 0xb4, 0xda, 0xf1, 0x7e, 0xc5, - 0x33, 0xb5, 0x4a, 0x2f, 0xc3, 0x48, 0x8b, 0xb8, 0x5e, 0xa7, 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, - 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xa2, 0x1e, 0x07, 0xa1, 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, - 0xec, 0xac, 0xf7, 0xde, 0x25, 0x37, 0xbc, 0x96, 0x27, 0x4d, 0x85, 0x7a, 0xca, 0xec, 0x16, 0xe4, - 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xfc, 0xd8, 0x8b, 0xf7, 0x85, 0x42, 0x48, 0x12, 0xc1, 0x09, 0x3d, - 0xfb, 0x2b, 0x16, 0x4c, 0xc9, 0x75, 0xbf, 0xe8, 0xba, 0x21, 0x89, 0x22, 0x34, 0x07, 0x05, 0xaf, - 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x6a, 0x0d, 0x17, 0xbc, 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, - 0x34, 0x75, 0x63, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x94, 0xfc, 0xc0, 0xe5, 0xe6, 0x5a, - 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, 0x35, 0x28, 0x73, 0x4b, 0xa6, 0x64, - 0xd1, 0x0e, 0x64, 0x0f, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, 0x09, 0x11, 0xfb, 0xfb, 0x2d, 0x18, - 0x97, 0x5f, 0x36, 0x20, 0xcf, 0x49, 0xb7, 0x56, 0xc2, 0x6f, 0x26, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, - 0x62, 0xb0, 0x8a, 0xc5, 0xa3, 0xb0, 0x8a, 0xf6, 0x0f, 0x17, 0x60, 0x52, 0x76, 0xa7, 0xde, 0xd9, - 0x8c, 0x48, 0x8c, 0x36, 0xa0, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x91, 0x2d, 0x14, 0x30, - 0xe6, 0x27, 0xb9, 0xbd, 0x17, 0x65, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, 0xf1, 0x83, 0x98, 0x9d, - 0xe4, 0x0a, 0xde, 0x4b, 0xf5, 0x92, 0xa6, 0x7e, 0x4e, 0x50, 0x9f, 0x59, 0x4f, 0x53, 0xc1, 0xdd, - 0x84, 0xd1, 0x8a, 0x14, 0xb4, 0x14, 0xf3, 0x5f, 0xf6, 0xfa, 0x2c, 0x64, 0xcb, 0x59, 0xec, 0x5f, - 0xb1, 0xa0, 0x2c, 0xd1, 0x4e, 0x42, 0xcb, 0xb6, 0x06, 0xa3, 0x11, 0x9b, 0x04, 0x39, 0x34, 0x76, - 0xaf, 0x8e, 0xf3, 0xf9, 0x4a, 0x2e, 0x28, 0xfe, 0x3f, 0xc2, 0x92, 0x06, 0x93, 0xb3, 0xab, 0xee, - 0xbf, 0x4f, 0xe4, 0xec, 0xaa, 0x3f, 0x39, 0x37, 0xcc, 0x1f, 0xb0, 0x3e, 0x6b, 0x82, 0x2b, 0xca, - 0x47, 0xb5, 0x43, 0xb2, 0xe5, 0xdd, 0x4b, 0xf3, 0x51, 0x35, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x05, - 0xe3, 0x0d, 0x29, 0x60, 0x4d, 0xb6, 0xeb, 0xa5, 0x9e, 0xc2, 0x7e, 0xa5, 0x17, 0xe2, 0x82, 0x8d, - 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x6a, 0xfe, 0x62, 0x3f, 0x35, 0x7f, 0x42, 0x37, 0x5f, 0xe9, - 0xfd, 0xa3, 0x16, 0x8c, 0x70, 0xc1, 0xda, 0x60, 0x72, 0x4d, 0x4d, 0x4d, 0x96, 0x8c, 0xdd, 0x6d, - 0x5a, 0x28, 0xd4, 0x5e, 0x68, 0x0d, 0xca, 0xec, 0x07, 0x13, 0x0c, 0x16, 0xf3, 0xad, 0xe2, 0x79, - 0xab, 0x7a, 0x07, 0x6f, 0xcb, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x60, 0x91, 0x1e, 0x55, 0x09, 0xaa, - 0x71, 0x83, 0x5b, 0x0f, 0xef, 0x06, 0x2f, 0x3c, 0xac, 0x1b, 0x7c, 0x1b, 0xa6, 0x1a, 0x9a, 0x52, - 0x2d, 0x99, 0xc9, 0xcb, 0x3d, 0x17, 0x89, 0xa6, 0x7f, 0xe3, 0x22, 0x93, 0x65, 0x93, 0x08, 0x4e, - 0x53, 0x45, 0xdf, 0x0c, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x52, 0xe2, 0x23, 0xf9, 0xeb, 0x45, - 0x6f, 0x82, 0x8b, 0xd8, 0xb4, 0xea, 0xd8, 0x20, 0x66, 0xff, 0xb1, 0x05, 0x68, 0xa5, 0xbd, 0x43, - 0x5a, 0x24, 0x74, 0x9a, 0x89, 0x6c, 0xfc, 0x2f, 0x5b, 0x30, 0x4b, 0xba, 0x8a, 0x97, 0x83, 0x56, - 0x4b, 0xbc, 0x40, 0x72, 0x1e, 0xc9, 0x2b, 0x39, 0x75, 0x94, 0xdb, 0xc0, 0x6c, 0x1e, 0x06, 0xce, - 0x6d, 0x0f, 0xad, 0xc1, 0x29, 0x7e, 0xe5, 0x29, 0x80, 0x66, 0x1b, 0xfd, 0xa8, 0x20, 0x7c, 0x6a, - 0xa3, 0x1b, 0x05, 0x67, 0xd5, 0xb3, 0xbf, 0x73, 0x1c, 0x72, 0x7b, 0xf1, 0x81, 0x52, 0xe0, 0x03, - 0xa5, 0xc0, 0x07, 0x4a, 0x81, 0x0f, 0x94, 0x02, 0x1f, 0x28, 0x05, 0xbe, 0xee, 0x95, 0x02, 0x7f, - 0x68, 0xc1, 0xa9, 0xee, 0x6b, 0xe0, 0x24, 0x18, 0xf3, 0x0e, 0x9c, 0xea, 0xbe, 0xeb, 0x7a, 0xda, - 0xc1, 0x75, 0xf7, 0x33, 0xb9, 0xf7, 0x32, 0xbe, 0x01, 0x67, 0xd1, 0xb7, 0x7f, 0xb1, 0x04, 0xc3, - 0x2b, 0x7b, 0xc4, 0x8f, 0x4f, 0xe0, 0x13, 0x1b, 0x30, 0xe9, 0xf9, 0x7b, 0x41, 0x73, 0x8f, 0xb8, - 0x1c, 0x7e, 0x94, 0xf7, 0xee, 0x59, 0x41, 0x7a, 0xb2, 0x6a, 0x90, 0xc0, 0x29, 0x92, 0x0f, 0x43, - 0xe6, 0x7c, 0x15, 0x46, 0xf8, 0xed, 0x20, 0x04, 0xce, 0x99, 0x97, 0x01, 0x1b, 0x44, 0x71, 0xe7, - 0x25, 0xf2, 0x70, 0x7e, 0xfb, 0x88, 0xea, 0xe8, 0x6d, 0x98, 0xdc, 0xf2, 0xc2, 0x28, 0xde, 0xf0, - 0x5a, 0x24, 0x8a, 0x9d, 0x56, 0xfb, 0x01, 0x64, 0xcc, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, - 0x19, 0x6d, 0xc3, 0x44, 0xd3, 0xd1, 0x9b, 0x1a, 0x3d, 0x72, 0x53, 0xea, 0xda, 0xb9, 0xa1, 0x13, - 0xc2, 0x26, 0x5d, 0xba, 0x4f, 0x1b, 0x4c, 0x4c, 0x5a, 0x62, 0xc2, 0x03, 0xb5, 0x4f, 0xb9, 0x7c, - 0x94, 0xc3, 0x28, 0x07, 0xc5, 0x2c, 0x63, 0xcb, 0x26, 0x07, 0xa5, 0xd9, 0xbf, 0x7e, 0x1e, 0xca, - 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x6e, 0xae, 0x2b, 0x83, 0xf5, 0x75, 0xcd, 0x6b, 0x84, 0x81, 0x29, - 0xdd, 0x5f, 0x91, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc3, 0x48, 0x44, 0x42, 0x8f, 0x44, 0xe2, 0x0e, - 0xeb, 0x31, 0x8d, 0x0c, 0x8d, 0x3b, 0x95, 0xf0, 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x82, - 0x4f, 0x76, 0xcb, 0x68, 0xcb, 0x6b, 0x91, 0x95, 0x62, 0x01, 0x45, 0xaf, 0xc3, 0x68, 0x48, 0x9a, - 0x4c, 0x7d, 0x34, 0x31, 0xf8, 0x22, 0xe7, 0xda, 0x28, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, - 0x90, 0x50, 0x0e, 0xcc, 0xf3, 0xb7, 0x95, 0xbd, 0xa8, 0x38, 0xc1, 0xd5, 0x8e, 0xc7, 0x09, 0x86, - 0xf4, 0xef, 0xc1, 0x19, 0xd5, 0xd0, 0x55, 0x98, 0x51, 0xa5, 0x55, 0x3f, 0x8a, 0x1d, 0x7a, 0x72, - 0x4e, 0x31, 0x5a, 0x4a, 0x00, 0x82, 0xd3, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, 0x53, 0x16, 0xf0, 0x71, - 0x3e, 0x81, 0x67, 0xff, 0x6b, 0xe6, 0xb3, 0xff, 0x5c, 0xee, 0xcc, 0xe5, 0x3c, 0xf9, 0xef, 0x5b, - 0x30, 0xa6, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x1e, 0x6b, 0xb6, 0x03, 0xd3, 0x74, 0xa5, 0xdf, 0xdc, - 0x8c, 0x48, 0xb8, 0x47, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0x95, 0x21, 0xdb, 0x8d, 0x14, - 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0xea, 0x52, 0x8a, 0x86, 0x1d, 0x38, 0xd7, 0x93, 0x1c, 0x1e, - 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0xeb, 0x4e, 0xec, 0xcf, 0xcb, 0x6f, 0x54, 0x06, 0x83, 0x0d, 0xb5, - 0x58, 0x52, 0x06, 0x83, 0x6a, 0x39, 0xe0, 0x04, 0x87, 0xee, 0xd1, 0x9d, 0x20, 0x8a, 0xd3, 0x06, - 0x83, 0xd7, 0x82, 0x28, 0xc6, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x8f, 0x34, 0xf8, 0x52, 0xd7, - 0x9f, 0x33, 0x56, 0xfe, 0x73, 0xc6, 0xfe, 0x0f, 0x16, 0x4c, 0xae, 0x2e, 0x1b, 0x12, 0xe1, 0x05, - 0x00, 0xfe, 0x06, 0xbb, 0x73, 0x67, 0x5d, 0x6a, 0xdb, 0xb9, 0xc2, 0x54, 0x95, 0x62, 0x0d, 0x03, - 0x9d, 0x83, 0x62, 0xb3, 0xe3, 0x0b, 0xe9, 0xe4, 0x28, 0xbd, 0xb0, 0x6f, 0x74, 0x7c, 0x4c, 0xcb, - 0x34, 0x27, 0x84, 0xe2, 0xc0, 0x4e, 0x08, 0x7d, 0x83, 0x01, 0xa0, 0x79, 0x18, 0xbe, 0x7b, 0xd7, - 0x73, 0xb9, 0xcb, 0xa5, 0xb0, 0x04, 0xb8, 0x73, 0xa7, 0x5a, 0x89, 0x30, 0x2f, 0xb7, 0xbf, 0x54, - 0x84, 0xb9, 0xd5, 0x26, 0xb9, 0xf7, 0x1e, 0xdd, 0x4e, 0x07, 0x75, 0xa1, 0x38, 0x9a, 0x68, 0xe8, - 0xa8, 0x6e, 0x32, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0xbd, 0x9c, 0x74, 0x42, 0x7d, 0x35, 0xab, - 0xf5, 0xfc, 0x01, 0x59, 0xe0, 0x76, 0x77, 0xc2, 0x87, 0x4e, 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, - 0x73, 0x9f, 0x80, 0x71, 0x1d, 0xf3, 0x48, 0x0e, 0x6b, 0x7f, 0xa9, 0x08, 0xd3, 0xb4, 0x07, 0x0f, - 0x75, 0x22, 0x6e, 0x75, 0x4f, 0xc4, 0x71, 0x3b, 0x2d, 0xf5, 0x9f, 0x8d, 0xb7, 0xd2, 0xb3, 0xf1, - 0x5c, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0x87, 0x05, 0xa7, 0x56, 0x9b, 0x41, 0x63, 0x37, 0xe5, - 0x58, 0xf4, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0x7c, 0xde, 0x8d, 0x28, 0x08, 0x02, 0x84, 0x75, - 0x3c, 0xad, 0xda, 0xad, 0x5b, 0xd5, 0x4a, 0x56, 0xf0, 0x04, 0x01, 0xc2, 0x3a, 0x9e, 0xfd, 0x9b, - 0x16, 0x9c, 0xbf, 0xba, 0xbc, 0x92, 0x2c, 0xc5, 0xae, 0xf8, 0x0d, 0x97, 0x60, 0xa4, 0xed, 0x6a, - 0x5d, 0x49, 0x04, 0xbe, 0x15, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0xc4, 0x26, 0xf9, 0x49, 0x0b, 0x4e, - 0x5d, 0xf5, 0x62, 0x7a, 0x2d, 0xa7, 0x23, 0x09, 0xd0, 0x7b, 0x39, 0xf2, 0xe2, 0x20, 0xdc, 0x4f, - 0x47, 0x12, 0xc0, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xc7, 0x2c, 0xb5, 0x0b, 0xa6, 0x1e, - 0x0b, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb9, 0x5e, 0xc8, 0xa4, 0x86, 0xfb, 0xe2, 0x84, 0x55, - 0x1f, 0x56, 0x91, 0x00, 0x9c, 0xe0, 0xd0, 0x07, 0xd4, 0xfc, 0xd5, 0x66, 0x27, 0x8a, 0x49, 0xb8, - 0x15, 0xe5, 0x9c, 0x8e, 0x2f, 0x40, 0x99, 0x48, 0x19, 0xbd, 0xe8, 0xb5, 0x62, 0x35, 0x95, 0xf0, - 0x9e, 0x07, 0x34, 0x50, 0x78, 0x03, 0xb8, 0x29, 0x1e, 0xcd, 0xcf, 0x6c, 0x15, 0x10, 0xd1, 0xdb, - 0xd2, 0x23, 0x3c, 0x30, 0x57, 0xf1, 0x95, 0x2e, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x88, 0x05, 0x67, - 0xd4, 0x07, 0xbf, 0xef, 0x3e, 0xd3, 0xfe, 0xb9, 0x02, 0x4c, 0x5c, 0xdb, 0xd8, 0xa8, 0x5d, 0x25, - 0xb1, 0xb8, 0xb6, 0xfb, 0xab, 0xd1, 0xb1, 0xa6, 0x0d, 0xec, 0xf5, 0x0a, 0xec, 0xc4, 0x5e, 0x73, - 0x81, 0x07, 0x0a, 0x5a, 0xa8, 0xfa, 0xf1, 0xcd, 0xb0, 0x1e, 0x87, 0x9e, 0xbf, 0x9d, 0xa9, 0x3f, - 0x94, 0xcc, 0x45, 0x31, 0x8f, 0xb9, 0x40, 0x2f, 0xc0, 0x08, 0x8b, 0x54, 0x24, 0x27, 0xe1, 0x51, - 0xf5, 0x88, 0x62, 0xa5, 0x87, 0x07, 0xf3, 0xe5, 0x5b, 0xb8, 0xca, 0xff, 0x60, 0x81, 0x8a, 0x6e, - 0xc1, 0xd8, 0x4e, 0x1c, 0xb7, 0xaf, 0x11, 0xc7, 0xa5, 0xaf, 0x65, 0x7e, 0x1c, 0x5e, 0xc8, 0x3a, - 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xe4, 0x04, 0x49, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, - 0xec, 0x98, 0x74, 0x27, 0xf6, 0xef, 0x5b, 0x30, 0xca, 0x83, 0x46, 0x84, 0xe8, 0x93, 0x30, 0x44, - 0xee, 0x91, 0x86, 0x60, 0x95, 0x33, 0x3b, 0x9c, 0x70, 0x5a, 0x5c, 0x06, 0x4c, 0xff, 0x63, 0x56, - 0x0b, 0x5d, 0x83, 0x51, 0xda, 0xdb, 0xab, 0x2a, 0x82, 0xc6, 0xe3, 0x79, 0x5f, 0xac, 0xa6, 0x9d, - 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0xfb, 0xdc, 0x68, 0xd7, 0xe9, 0x89, 0x1d, 0xf7, 0x62, - 0x2c, 0x36, 0x96, 0x6b, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x67, 0x59, 0x88, 0x13, 0x22, 0xf6, 0x06, - 0x94, 0xe9, 0xa4, 0x2e, 0x36, 0x3d, 0xa7, 0xb7, 0x42, 0xfd, 0x69, 0x28, 0x4b, 0x75, 0x79, 0x24, - 0x9c, 0xc5, 0x19, 0x55, 0xa9, 0x4d, 0x8f, 0x70, 0x02, 0xb7, 0xb7, 0xe0, 0x34, 0x33, 0x7e, 0x74, - 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x88, 0x97, 0x27, 0x9f, 0x99, 0x59, 0xcd, 0x1f, - 0x73, 0x5c, 0x52, 0x4c, 0x5e, 0xa1, 0xf6, 0x57, 0x87, 0xe0, 0xd1, 0x6a, 0x3d, 0x3f, 0x9e, 0xc8, - 0x2b, 0x30, 0xce, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0xa6, 0x68, 0x57, 0x09, 0x7f, 0x37, 0x34, 0x18, - 0x36, 0x30, 0xd1, 0x79, 0x28, 0x7a, 0xef, 0xf8, 0x69, 0xd7, 0xa6, 0xea, 0x1b, 0xeb, 0x98, 0x96, - 0x53, 0x30, 0x65, 0x71, 0xf9, 0xdd, 0xa1, 0xc0, 0x8a, 0xcd, 0x7d, 0x0d, 0x26, 0xbd, 0xa8, 0x11, - 0x79, 0x55, 0x9f, 0x9e, 0x33, 0xda, 0x49, 0xa5, 0xa4, 0x22, 0xb4, 0xd3, 0x0a, 0x8a, 0x53, 0xd8, - 0xda, 0x45, 0x36, 0x3c, 0x30, 0x9b, 0xdc, 0xd7, 0x7b, 0x9a, 0xbe, 0x00, 0xda, 0xec, 0xeb, 0x22, - 0x26, 0xc5, 0x17, 0x2f, 0x00, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0xfa, 0xe4, 0x6c, 0xec, 0x38, 0xed, - 0xc5, 0x4e, 0xbc, 0x53, 0xf1, 0xa2, 0x46, 0xb0, 0x47, 0xc2, 0x7d, 0x26, 0x2d, 0x28, 0x25, 0x4f, - 0x4e, 0x05, 0x58, 0xbe, 0xb6, 0x58, 0xa3, 0x98, 0xb8, 0xbb, 0x0e, 0x5a, 0x84, 0x29, 0x59, 0x58, - 0x27, 0x11, 0xbb, 0xc2, 0xc6, 0x18, 0x19, 0xe5, 0x6c, 0x24, 0x8a, 0x15, 0x91, 0x34, 0xbe, 0xc9, - 0x49, 0xc3, 0x71, 0x70, 0xd2, 0x2f, 0xc3, 0x84, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x01, 0x57, 0x41, - 0x71, 0xc1, 0x00, 0x93, 0xad, 0x57, 0x75, 0x00, 0x36, 0xf1, 0xec, 0xff, 0x36, 0x04, 0x33, 0x6c, - 0xda, 0x3e, 0x58, 0x61, 0x5f, 0x4f, 0x2b, 0xec, 0x56, 0xf7, 0x0a, 0x3b, 0x8e, 0x27, 0xc2, 0x03, - 0x2f, 0xb3, 0xb7, 0xa1, 0xac, 0xfc, 0xab, 0xa4, 0x83, 0xa5, 0x95, 0xe3, 0x60, 0xd9, 0x9f, 0xfb, - 0x90, 0x26, 0x6a, 0xc5, 0x4c, 0x13, 0xb5, 0xbf, 0x65, 0x41, 0xa2, 0x53, 0x41, 0xd7, 0xa0, 0xdc, - 0x0e, 0x98, 0xe5, 0x65, 0x28, 0xcd, 0x99, 0x1f, 0xcd, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0x3f, 0xbe, - 0x26, 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xd1, 0x76, 0x48, 0xea, 0x31, 0x0b, 0x2b, 0xd2, 0x97, - 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x5b, 0x00, 0xdc, 0x0a, 0xcc, 0xf1, 0xb7, - 0xc9, 0x09, 0x88, 0xbb, 0x2b, 0x30, 0x14, 0xb5, 0x49, 0xa3, 0x97, 0x4d, 0x6c, 0xd2, 0x9f, 0x7a, - 0x9b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x17, 0xc0, 0x64, 0x82, 0x56, 0x8d, - 0x49, 0x0b, 0x3d, 0x6b, 0x84, 0x19, 0x38, 0x97, 0x0a, 0x33, 0x50, 0x66, 0xd8, 0x9a, 0x64, 0xf5, - 0x6d, 0x28, 0xb6, 0x9c, 0x7b, 0x42, 0x74, 0xf6, 0x74, 0xef, 0x6e, 0x50, 0xfa, 0x0b, 0x6b, 0xce, - 0x3d, 0xfe, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x35, 0xe7, 0xde, 0x21, 0xb7, 0x7c, 0x65, 0x87, 0xd4, - 0x0d, 0x2f, 0x8a, 0xbf, 0xf0, 0x5f, 0x93, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, 0x5b, 0x9e, 0x2f, - 0x6c, 0xa2, 0x06, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0xfe, 0x00, 0x6d, 0x79, 0x3e, 0x7a, 0x17, - 0x46, 0x85, 0xfd, 0xa1, 0x08, 0xeb, 0x73, 0x65, 0x80, 0xf6, 0x84, 0xf9, 0x22, 0x6f, 0xf3, 0x8a, - 0x7c, 0x04, 0x8b, 0xd2, 0xbe, 0xed, 0xca, 0x06, 0xd1, 0x5f, 0xb7, 0x60, 0x52, 0xfc, 0xc6, 0xe4, - 0x9d, 0x0e, 0x89, 0x62, 0xc1, 0x7b, 0x7e, 0x7c, 0xf0, 0x3e, 0x88, 0x8a, 0xbc, 0x2b, 0x1f, 0x97, - 0xc7, 0xac, 0x09, 0xec, 0xdb, 0xa3, 0x54, 0x2f, 0xd0, 0x3f, 0xb0, 0xe0, 0x74, 0xcb, 0xb9, 0xc7, - 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xa8, 0xfe, 0x3f, 0x39, 0xd8, 0xf4, 0x77, 0x55, 0xe7, - 0x9d, 0x94, 0xfa, 0xc9, 0xd3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, 0xd9, 0xaf, 0xb9, 0x2d, 0x28, 0xc9, - 0xf5, 0x96, 0x21, 0x6a, 0xa8, 0xe8, 0x8c, 0xf5, 0x91, 0xcd, 0x3f, 0x75, 0x5f, 0x7f, 0xda, 0x8e, - 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xb7, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0x77, 0xe0, 0x54, - 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x85, 0x73, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, - 0xa5, 0x9f, 0x83, 0x27, 0xa0, 0x73, 0x58, 0x36, 0x75, 0x0e, 0x17, 0x7a, 0xef, 0x9c, 0x1c, 0xc5, - 0xc3, 0x5b, 0x7a, 0xa7, 0xe9, 0xa9, 0x8e, 0x5e, 0x87, 0x91, 0x26, 0x2d, 0x91, 0x86, 0xaf, 0x76, - 0xff, 0x1d, 0x99, 0xf0, 0x52, 0xac, 0x3c, 0xc2, 0x82, 0x82, 0xfd, 0x4b, 0x16, 0x0c, 0x9d, 0xc0, - 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x25, 0x2d, 0x22, 0x0e, 0x2f, 0x60, 0xe7, 0xee, 0xca, 0xbd, - 0x98, 0xf8, 0x11, 0x7b, 0x2a, 0x66, 0x0e, 0xcc, 0xb7, 0xc0, 0xa9, 0x1b, 0x81, 0xe3, 0x2e, 0x39, - 0x4d, 0xc7, 0x6f, 0x90, 0xb0, 0xea, 0x6f, 0x1f, 0xc9, 0x02, 0xbb, 0xd0, 0xcf, 0x02, 0xdb, 0xde, - 0x01, 0xa4, 0x37, 0x20, 0x5c, 0x59, 0x30, 0x8c, 0x7a, 0xbc, 0x29, 0x31, 0xfc, 0x4f, 0x66, 0xb3, - 0x66, 0x5d, 0x3d, 0xd3, 0x9c, 0x34, 0x78, 0x01, 0x96, 0x84, 0xec, 0x57, 0x20, 0xd3, 0x1f, 0xbe, - 0xbf, 0xd8, 0xc0, 0xfe, 0x0c, 0xcc, 0xb0, 0x9a, 0x47, 0x7c, 0xd2, 0xda, 0x29, 0xa9, 0x64, 0x46, - 0xf0, 0x3b, 0xfb, 0x8b, 0x16, 0x4c, 0xad, 0xa7, 0x62, 0x82, 0x5d, 0x62, 0x0a, 0xd0, 0x0c, 0x61, - 0x78, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x19, 0xd4, 0x9f, 0x5b, 0x90, 0x84, 0xa8, 0x38, 0x01, - 0xc6, 0x6b, 0xd9, 0x60, 0xbc, 0x32, 0x65, 0x23, 0xaa, 0x3b, 0x79, 0x7c, 0x17, 0xba, 0xae, 0xe2, - 0x31, 0xf5, 0x10, 0x8b, 0x24, 0x64, 0x78, 0xf4, 0x9e, 0x49, 0x33, 0x68, 0x93, 0x8c, 0xd0, 0x64, - 0xff, 0xe7, 0x02, 0x20, 0x85, 0x3b, 0x70, 0xbc, 0xa8, 0xee, 0x1a, 0xc7, 0x13, 0x2f, 0x6a, 0x0f, - 0x10, 0x53, 0xe1, 0x87, 0x8e, 0x1f, 0x71, 0xb2, 0x9e, 0x90, 0xba, 0x1d, 0xcd, 0x3e, 0x60, 0x4e, - 0x34, 0x89, 0x6e, 0x74, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0xa6, 0x19, 0xc3, 0x83, 0x9a, 0x66, 0x8c, - 0xf4, 0x71, 0x57, 0xfb, 0x59, 0x0b, 0x26, 0xd4, 0x30, 0xbd, 0x4f, 0xec, 0xcf, 0x55, 0x7f, 0x72, - 0x8e, 0xbe, 0x9a, 0xd6, 0x65, 0x76, 0x25, 0x7c, 0x23, 0x73, 0x3b, 0x74, 0x9a, 0xde, 0xbb, 0x44, - 0x45, 0xeb, 0x9b, 0x17, 0x6e, 0x84, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x42, 0xfd, 0xe3, 0xd1, 0x81, - 0x93, 0x2a, 0xf6, 0x8f, 0xd3, 0xcd, 0x6e, 0x2e, 0x45, 0xf4, 0x12, 0x0c, 0xb7, 0x77, 0x9c, 0x88, - 0xa4, 0x9c, 0x6e, 0x86, 0x6b, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, - 0x0f, 0x1e, 0x85, 0xab, 0x7b, 0x71, 0xf6, 0x8d, 0xc2, 0xf5, 0xc7, 0x16, 0x0c, 0xad, 0x07, 0xee, - 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x63, 0x79, 0x81, 0xdb, 0x73, 0x77, 0xff, 0x6a, 0x6a, - 0xf7, 0x5f, 0xc8, 0xa5, 0xd0, 0x7b, 0xe3, 0xb7, 0x60, 0x8c, 0x85, 0x83, 0x17, 0x0e, 0x46, 0x2f, - 0x18, 0x1b, 0x7e, 0x3e, 0xb5, 0xe1, 0xa7, 0x34, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xa3, 0xc2, 0xc9, - 0x25, 0xed, 0xbd, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x5a, 0x04, 0x23, 0xfc, 0x3c, 0xfa, 0x15, - 0x0b, 0x16, 0x42, 0x6e, 0xfc, 0xea, 0x56, 0x3a, 0xa1, 0xe7, 0x6f, 0xd7, 0x1b, 0x3b, 0xc4, 0xed, - 0x34, 0x3d, 0x7f, 0xbb, 0xba, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x1e, 0x69, 0x74, 0x98, 0xfa, 0xaa, - 0x4f, 0xac, 0x7b, 0x65, 0x44, 0xfe, 0xfc, 0xfd, 0x83, 0xf9, 0x05, 0x7c, 0x24, 0xda, 0xf8, 0x88, - 0x7d, 0x41, 0xbf, 0x69, 0xc1, 0x15, 0x1e, 0x95, 0x7d, 0xf0, 0xfe, 0xf7, 0x78, 0xe7, 0xd6, 0x24, - 0xa9, 0x84, 0xc8, 0x06, 0x09, 0x5b, 0x4b, 0x2f, 0x8b, 0x01, 0xbd, 0x52, 0x3b, 0x5a, 0x5b, 0xf8, - 0xa8, 0x9d, 0xb3, 0xff, 0x45, 0x11, 0x26, 0x44, 0x68, 0x27, 0x71, 0x07, 0xbc, 0x64, 0x2c, 0x89, - 0xc7, 0x53, 0x4b, 0x62, 0xc6, 0x40, 0x3e, 0x9e, 0xe3, 0x3f, 0x82, 0x19, 0x7a, 0x38, 0x5f, 0x23, - 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x5b, 0x5c, 0x15, 0x8f, 0x7c, 0xfa, 0x2b, 0xc1, 0xda, 0x8d, 0x34, - 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xce, 0xf1, 0x61, 0xba, 0x2b, 0x3a, 0xd7, 0x9b, 0x50, 0x56, - 0x1e, 0x1a, 0xe2, 0xd0, 0xe9, 0x1d, 0xe4, 0x2e, 0x4d, 0x81, 0x0b, 0xbf, 0x12, 0xef, 0xa0, 0x84, - 0x9c, 0xfd, 0x0f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, - 0x57, 0xec, 0xd8, 0x0f, 0xe7, 0xed, 0x58, 0xa3, 0x19, 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, - 0x03, 0x5d, 0xe3, 0x76, 0x6d, 0x7b, 0xf2, 0xa5, 0x36, 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, - 0x8b, 0xfa, 0xe8, 0xb3, 0xdc, 0xf0, 0xf0, 0xba, 0x1f, 0xdc, 0xf5, 0xaf, 0x06, 0x81, 0x0c, 0x9f, - 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, 0x93, 0xda, 0x60, 0x11, 0x2c, 0xbf, 0x15, - 0x4e, 0x51, 0xd2, 0xa6, 0x77, 0x73, 0x84, 0x08, 0x4c, 0x89, 0xb8, 0x61, 0xb2, 0x4c, 0x8c, 0x5d, - 0xe6, 0x23, 0xcc, 0xac, 0x9d, 0x48, 0x80, 0xaf, 0x9b, 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0x13, 0x16, - 0x30, 0x4f, 0xcf, 0x13, 0xe0, 0x47, 0x3e, 0x65, 0xf2, 0x23, 0xb3, 0x79, 0x83, 0x9c, 0xc3, 0x8a, - 0xbc, 0xc8, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xed, 0x0b, 0xa3, 0x8f, 0xfe, 0xef, 0x0f, 0xfb, 0xff, - 0x58, 0xfc, 0x10, 0x53, 0xfe, 0x13, 0xe8, 0xdb, 0xa0, 0xd4, 0x70, 0xda, 0x4e, 0x83, 0xe7, 0x4a, - 0xc9, 0x95, 0xc5, 0x19, 0x95, 0x16, 0x96, 0x45, 0x0d, 0x2e, 0x5b, 0x92, 0xf1, 0xe7, 0x4a, 0xb2, - 0xb8, 0xaf, 0x3c, 0x49, 0x35, 0x39, 0xb7, 0x0b, 0x13, 0x06, 0xb1, 0x87, 0x2a, 0x88, 0xf8, 0x36, - 0x7e, 0xc5, 0xaa, 0x78, 0x89, 0x2d, 0x98, 0xf1, 0xb5, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0x0f, - 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xf9, 0x9d, 0xa6, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x59, - 0xf0, 0x88, 0x8e, 0xa8, 0xb9, 0xb6, 0xf4, 0x93, 0xee, 0x57, 0xa0, 0x14, 0xb4, 0x49, 0xe8, 0xc4, - 0x41, 0x28, 0x6e, 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x8a, 0xf2, 0x43, 0x11, 0x69, 0x5c, 0x52, 0x97, - 0xe5, 0x58, 0xd5, 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0x91, 0x70, 0x62, 0x62, 0x67, 0x00, 0x53, 0x74, - 0x47, 0x58, 0x40, 0xec, 0xaf, 0x5a, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0xef, 0xc0, 0x74, 0xcb, 0x89, - 0x1b, 0x3b, 0x2b, 0xf7, 0xda, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xba, 0xdf, 0x38, 0x69, 0x1f, - 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, - 0x8b, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, - 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc1, 0x68, 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, - 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x92, 0xf8, 0x29, 0x75, 0x5b, 0xec, - 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x3c, 0x40, 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0x05, 0x81, - 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, 0x7e, 0xc4, 0xd9, - 0x11, 0x67, 0x53, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x22, - 0x8c, 0xc4, 0x0e, 0x33, 0x5b, 0x19, 0xce, 0xb7, 0xb7, 0xdd, 0xa0, 0x18, 0x7a, 0x5a, 0x0e, 0x5a, - 0x01, 0x8b, 0x8a, 0xe8, 0x4d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, 0x3e, 0xd8, 0x25, 0xa0, - 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x15, 0x80, 0xdc, 0x8b, 0x49, 0xe8, 0x3b, 0x4d, - 0x65, 0x15, 0xa6, 0xec, 0xa0, 0x2b, 0xc1, 0x7a, 0x10, 0xdf, 0x8a, 0xc8, 0xb7, 0xac, 0x28, 0x14, - 0xac, 0xa1, 0xdb, 0xbf, 0x59, 0x06, 0x48, 0x18, 0x77, 0xf4, 0x6e, 0xd7, 0xc9, 0xf5, 0x4c, 0x6f, - 0x56, 0xff, 0xf8, 0x8e, 0x2d, 0xf4, 0xdd, 0x16, 0x8c, 0x39, 0xcd, 0x66, 0xd0, 0x70, 0x62, 0x36, - 0x45, 0x85, 0xde, 0x27, 0xa7, 0x68, 0x7f, 0x31, 0xa9, 0xc1, 0xbb, 0xf0, 0x82, 0x5c, 0xa2, 0x1a, - 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x5b, 0x91, 0xaf, 0xad, 0xb9, 0xf4, 0x5b, 0xb1, - 0xcc, 0x2e, 0x09, 0xfd, 0x99, 0x78, 0xcb, 0x78, 0x26, 0x0e, 0xe5, 0x3b, 0x03, 0x1a, 0xfc, 0x6b, - 0xbf, 0x17, 0x22, 0xaa, 0xe9, 0x81, 0x01, 0x86, 0xf3, 0x3d, 0xef, 0xb4, 0x87, 0x52, 0x9f, 0xa0, - 0x00, 0x6f, 0xc3, 0x94, 0x6b, 0x72, 0x01, 0x62, 0x29, 0x3e, 0x99, 0x47, 0x37, 0xc5, 0x34, 0x24, - 0xf7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x07, 0x7d, 0xa8, 0xfa, 0x5b, 0x81, 0xf0, 0xb6, - 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xd7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, - 0x15, 0xf4, 0x3a, 0x8c, 0x30, 0xd7, 0xab, 0x68, 0xb6, 0x94, 0x2f, 0x2c, 0x36, 0xa3, 0x98, 0x25, - 0x3b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x49, 0xc7, 0xc6, 0xa8, 0xea, 0xdf, 0x8a, 0x08, - 0x73, 0x6c, 0x2c, 0x2f, 0x7d, 0x38, 0xf1, 0x59, 0xe4, 0xe5, 0x99, 0xd9, 0xbb, 0x8c, 0x9a, 0x94, - 0x8d, 0x12, 0xff, 0x65, 0x52, 0xb0, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x38, 0x2c, 0x19, 0xce, 0xdb, - 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x59, 0x52, 0xbe, 0xed, 0x85, 0xbf, 0x46, 0xbf, 0xc3, 0x83, 0xbf, - 0xc4, 0xd9, 0x75, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xfc, 0xc1, 0x9c, 0x0f, 0xd3, 0xe9, 0x2d, - 0xfa, 0x50, 0xf9, 0x91, 0xdf, 0x1f, 0x82, 0x49, 0x73, 0x49, 0xa1, 0x2b, 0x50, 0x16, 0x44, 0x54, - 0x20, 0x7f, 0xb5, 0x4b, 0xd6, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7f, 0x03, 0xab, 0xae, 0xd9, 0xd9, - 0x26, 0xf9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x65, 0xb5, 0x19, 0x04, 0xb1, 0xba, 0x91, 0xd4, - 0xba, 0x5b, 0x62, 0xa5, 0x58, 0x40, 0xe9, 0x4d, 0xb4, 0x4b, 0x42, 0x9f, 0x34, 0xcd, 0xf8, 0xc0, - 0xea, 0x26, 0xba, 0xae, 0x03, 0xb1, 0x89, 0x4b, 0xef, 0xd3, 0x20, 0x62, 0x0b, 0x59, 0xbc, 0xdf, - 0x12, 0xbb, 0xe5, 0x3a, 0xf7, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0xc5, 0x40, 0xc2, 0x5c, - 0x11, 0x21, 0x5b, 0x1c, 0x31, 0xc4, 0x2d, 0x8f, 0x2c, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, 0xe8, 0x35, - 0x98, 0x14, 0x3c, 0xbe, 0xa4, 0x38, 0x6a, 0xda, 0xc6, 0x5c, 0x37, 0xa0, 0x38, 0x85, 0x2d, 0x23, - 0x1c, 0x33, 0x36, 0x5b, 0x52, 0x28, 0x75, 0x47, 0x38, 0xd6, 0xe1, 0xb8, 0xab, 0x06, 0x5a, 0x84, - 0x29, 0xce, 0x84, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4e, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, - 0xd3, 0xf8, 0xe8, 0x15, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, 0xb9, 0x9f, - 0x95, 0x66, 0x5c, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0b, 0xa7, 0x32, 0x82, 0x2e, 0xd0, - 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x0b, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, - 0x57, 0x27, 0x0b, 0xce, 0xa0, 0xe5, 0x00, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, 0x63, 0xff, - 0xcf, 0x02, 0x4c, 0x65, 0x28, 0x57, 0x58, 0x1e, 0xba, 0xd4, 0x2b, 0x25, 0x49, 0x3b, 0x67, 0x06, - 0xcc, 0x2e, 0x1c, 0x21, 0x60, 0x76, 0xb1, 0x5f, 0xc0, 0xec, 0xa1, 0xf7, 0x12, 0x30, 0xdb, 0x1c, - 0xb1, 0xe1, 0x81, 0x46, 0x2c, 0x23, 0xc8, 0xf6, 0xc8, 0x11, 0x83, 0x6c, 0x1b, 0x83, 0x3e, 0x3a, - 0xc0, 0xa0, 0xff, 0x60, 0x01, 0xa6, 0xd3, 0x46, 0x90, 0x27, 0x20, 0xb8, 0x7d, 0xdd, 0x10, 0xdc, - 0x66, 0x67, 0x75, 0x4c, 0x9b, 0x66, 0xe6, 0x09, 0x71, 0x71, 0x4a, 0x88, 0xfb, 0xd1, 0x81, 0xa8, - 0xf5, 0x16, 0xe8, 0xfe, 0x9d, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6e, 0x3a, 0x5e, 0xeb, 0x04, 0xc6, - 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0x0e, 0xf2, 0x35, 0xac, 0x6b, 0xb9, 0x03, 0x74, 0x27, 0x35, 0x40, - 0x57, 0x06, 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x52, 0x84, 0x0b, 0x99, 0xf5, 0x12, 0xb9, 0xe7, 0xaa, - 0x21, 0xf7, 0x7c, 0x3e, 0x25, 0xf7, 0xb4, 0x7b, 0xd7, 0x3e, 0x1e, 0x41, 0xa8, 0x70, 0x91, 0x65, - 0x11, 0x04, 0x1e, 0x50, 0x08, 0x6a, 0xb8, 0xc8, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xf5, 0x24, 0xfc, - 0xfc, 0x37, 0x16, 0x9c, 0xcb, 0x9c, 0x9b, 0x13, 0x10, 0x76, 0xad, 0x9b, 0xc2, 0xae, 0xa7, 0x06, - 0x5e, 0xad, 0x39, 0xd2, 0xaf, 0x5f, 0x1f, 0xca, 0xf9, 0x16, 0xf6, 0x94, 0xbf, 0x09, 0x63, 0x4e, - 0xa3, 0x41, 0xa2, 0x68, 0x2d, 0x70, 0x55, 0x4c, 0xe0, 0x67, 0xd9, 0x3b, 0x2b, 0x29, 0x3e, 0x3c, - 0x98, 0x9f, 0x4b, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x80, 0x3e, 0x0b, 0xa5, 0x48, 0xdc, 0x9b, 0x62, - 0xee, 0x5f, 0x18, 0x70, 0x70, 0x9c, 0x4d, 0xd2, 0x34, 0xe3, 0x1c, 0x29, 0x51, 0x85, 0x22, 0x69, - 0xc6, 0x44, 0x29, 0x1c, 0x6b, 0x4c, 0x94, 0xe7, 0x01, 0xf6, 0xd4, 0x63, 0x20, 0x2d, 0x80, 0xd0, - 0x9e, 0x09, 0x1a, 0x16, 0xfa, 0x26, 0x98, 0x8e, 0x78, 0x54, 0xbf, 0xe5, 0xa6, 0x13, 0x31, 0x3f, - 0x17, 0xb1, 0x0a, 0x59, 0x2c, 0xa5, 0x7a, 0x0a, 0x86, 0xbb, 0xb0, 0xd1, 0xaa, 0x6c, 0x95, 0x85, - 0x20, 0xe4, 0x0b, 0xf3, 0x52, 0xd2, 0xa2, 0xc8, 0x82, 0x7b, 0x3a, 0x3d, 0xfc, 0x6c, 0xe0, 0xb5, - 0x9a, 0xe8, 0xb3, 0x00, 0x74, 0xf9, 0x08, 0x41, 0xc4, 0x68, 0xfe, 0xe1, 0x49, 0x4f, 0x15, 0x37, - 0xd3, 0x2c, 0x97, 0x39, 0xa7, 0x56, 0x14, 0x11, 0xac, 0x11, 0xb4, 0x7f, 0x70, 0x08, 0x1e, 0xed, - 0x71, 0x46, 0xa2, 0x45, 0x53, 0x11, 0xfb, 0x74, 0xfa, 0x71, 0x3d, 0x97, 0x59, 0xd9, 0x78, 0x6d, - 0xa7, 0x96, 0x62, 0xe1, 0x3d, 0x2f, 0xc5, 0xef, 0xb3, 0x34, 0xb1, 0x07, 0x37, 0xd6, 0xfc, 0xd4, - 0x11, 0xcf, 0xfe, 0x63, 0x94, 0x83, 0x6c, 0x65, 0x08, 0x13, 0x9e, 0x1f, 0xb8, 0x3b, 0x03, 0x4b, - 0x17, 0x4e, 0x56, 0x4c, 0xfc, 0x05, 0x0b, 0x1e, 0xcf, 0xec, 0xaf, 0x61, 0x92, 0x73, 0x05, 0xca, - 0x0d, 0x5a, 0xa8, 0xf9, 0x22, 0x26, 0x4e, 0xda, 0x12, 0x80, 0x13, 0x1c, 0xc3, 0xf2, 0xa6, 0xd0, - 0xd7, 0xf2, 0xe6, 0x9f, 0x5b, 0xd0, 0xb5, 0x3f, 0x4e, 0xe0, 0xa0, 0xae, 0x9a, 0x07, 0xf5, 0x87, - 0x07, 0x99, 0xcb, 0x9c, 0x33, 0xfa, 0x8f, 0xa6, 0xe0, 0x6c, 0x8e, 0x2f, 0xce, 0x1e, 0xcc, 0x6c, - 0x37, 0x88, 0xe9, 0xe5, 0x29, 0x3e, 0x26, 0xd3, 0x21, 0xb6, 0xa7, 0x4b, 0x28, 0x4b, 0x69, 0x39, - 0xd3, 0x85, 0x82, 0xbb, 0x9b, 0x40, 0x5f, 0xb0, 0xe0, 0xb4, 0x73, 0x37, 0xea, 0xca, 0x81, 0x2f, - 0xd6, 0xcc, 0x8b, 0x99, 0x42, 0x90, 0x3e, 0x39, 0xf3, 0x79, 0x8e, 0xcf, 0x2c, 0x2c, 0x9c, 0xd9, - 0x16, 0xc2, 0x22, 0x4a, 0x3c, 0x65, 0xe7, 0x7b, 0xf8, 0x21, 0x67, 0x39, 0x4d, 0xf1, 0x1b, 0x44, - 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x6d, 0xe9, 0xc9, 0x98, 0x71, 0x43, 0x25, 0x03, 0xd9, - 0xdb, 0xbf, 0x93, 0xab, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa0, 0xe8, 0x6f, 0x45, 0xbd, - 0xd2, 0x64, 0xa6, 0x6c, 0xd6, 0xb8, 0xb7, 0xff, 0xfa, 0x6a, 0x1d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, - 0xc3, 0x4d, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x54, 0xc9, 0xe9, 0x15, 0xa3, 0x84, 0x97, - 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x66, 0x0e, 0x2c, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0x7b, 0x38, - 0x82, 0xf1, 0x90, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x03, 0x46, 0x1a, 0x2c, 0xa5, 0xa2, 0x08, - 0x48, 0xf6, 0xb1, 0x4c, 0x59, 0x5d, 0x8f, 0x5c, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, 0xa0, 0xc5, - 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x24, 0x52, 0x00, 0x67, 0x53, 0xed, 0x91, 0x42, 0x55, 0x50, 0x65, - 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x80, 0xc2, 0x56, 0x43, 0x38, 0xa7, 0x64, 0x0a, 0xed, 0xcc, 0x80, - 0x0d, 0x4b, 0x23, 0xf7, 0x0f, 0xe6, 0x0b, 0xab, 0xcb, 0xb8, 0xb0, 0xd5, 0x40, 0xeb, 0x30, 0xba, - 0xc5, 0x5d, 0xbc, 0x85, 0x5c, 0xee, 0xc9, 0x6c, 0xef, 0xf3, 0x2e, 0x2f, 0x70, 0xee, 0x97, 0x21, - 0x00, 0x58, 0x12, 0x61, 0x41, 0xd7, 0x95, 0xab, 0xba, 0x88, 0xdd, 0xb5, 0x70, 0xb4, 0xf0, 0x02, - 0xfc, 0x7e, 0x4e, 0x1c, 0xde, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0xe6, 0x61, 0x17, 0xb1, 0x58, - 0x32, 0x57, 0x75, 0x9f, 0x14, 0xf5, 0x7c, 0x55, 0x2b, 0x24, 0x9c, 0x10, 0x45, 0xbb, 0x30, 0xb1, - 0x17, 0xb5, 0x77, 0x88, 0xdc, 0xd2, 0x2c, 0x34, 0x4b, 0xce, 0x15, 0x76, 0x5b, 0x20, 0x7a, 0x61, - 0xdc, 0x71, 0x9a, 0x5d, 0xa7, 0x10, 0xd3, 0x7f, 0xdf, 0xd6, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, - 0x77, 0x3a, 0xc1, 0xe6, 0x7e, 0x4c, 0x44, 0xc8, 0xad, 0xcc, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0f, - 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x16, 0xc3, 0xc3, 0x4e, 0xcf, 0xe9, 0xfc, 0xb8, 0x98, 0x8b, - 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x99, 0x90, 0x62, 0xa7, 0x64, 0x7b, 0x27, 0x88, 0x03, 0x3f, - 0x75, 0x42, 0xcf, 0xe4, 0x9f, 0x92, 0xb5, 0x0c, 0xfc, 0xee, 0x53, 0x32, 0x0b, 0x0b, 0x67, 0xb6, - 0x85, 0x5c, 0x98, 0x6c, 0x07, 0x61, 0x7c, 0x37, 0x08, 0xe5, 0xfa, 0x42, 0x3d, 0xe4, 0x0a, 0x06, - 0xa6, 0x68, 0x91, 0x45, 0xb3, 0x33, 0x21, 0x38, 0x45, 0x13, 0x7d, 0x1a, 0x46, 0xa3, 0x86, 0xd3, - 0x24, 0xd5, 0x9b, 0xb3, 0xa7, 0xf2, 0xaf, 0x9f, 0x3a, 0x47, 0xc9, 0x59, 0x5d, 0x3c, 0x42, 0x3b, - 0x47, 0xc1, 0x92, 0x1c, 0x5a, 0x85, 0x61, 0x96, 0x54, 0x8b, 0xc5, 0x87, 0xcb, 0x09, 0xef, 0xd9, - 0x65, 0x41, 0xcc, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x04, 0x7b, 0x1d, 0x44, 0xb3, - 0x67, 0xf2, 0xf7, 0x80, 0xe0, 0xca, 0x6f, 0xd6, 0x7b, 0xed, 0x01, 0x85, 0x84, 0x13, 0xa2, 0xf4, - 0x64, 0xa6, 0xa7, 0xe9, 0xd9, 0x1e, 0xa6, 0x2f, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, - 0x49, 0xd8, 0xbf, 0x3b, 0xda, 0xcd, 0xb3, 0xb0, 0x07, 0xd9, 0x77, 0x5a, 0x5d, 0xba, 0xba, 0x8f, - 0x0f, 0x2a, 0x1f, 0x3a, 0x46, 0x6e, 0xf5, 0x0b, 0x16, 0x9c, 0x6d, 0x67, 0x7e, 0x88, 0x60, 0x00, - 0x06, 0x13, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0x04, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xf4, 0x8b, 0xa0, - 0xf8, 0x9e, 0x5f, 0x04, 0x6b, 0x50, 0x62, 0x4c, 0x66, 0x9f, 0x14, 0xc3, 0xe9, 0x87, 0x11, 0x63, - 0x25, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0xef, 0xb7, 0xe0, 0x7c, 0xba, 0xeb, 0x98, 0x30, 0xb0, - 0x08, 0x40, 0xc8, 0xdf, 0x82, 0xab, 0xe2, 0xfb, 0xcf, 0xd7, 0x7a, 0x21, 0x1f, 0xf6, 0x43, 0xc0, - 0xbd, 0x1b, 0x43, 0x95, 0x8c, 0xc7, 0xe8, 0x88, 0x29, 0x80, 0x1f, 0xe0, 0x41, 0xfa, 0x22, 0x8c, - 0xb7, 0x82, 0x8e, 0x1f, 0x0b, 0x4b, 0x19, 0xa1, 0xb5, 0x67, 0xda, 0xea, 0x35, 0xad, 0x1c, 0x1b, - 0x58, 0xa9, 0x67, 0x6c, 0xe9, 0x81, 0x9f, 0xb1, 0x6f, 0xc1, 0xb8, 0xaf, 0x99, 0x76, 0x0a, 0x7e, - 0xe0, 0x52, 0x7e, 0xf0, 0x50, 0xdd, 0x10, 0x94, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, - 0x1b, 0xfd, 0x94, 0x95, 0xc1, 0xd4, 0xf3, 0xd7, 0xf2, 0x27, 0xcd, 0xd7, 0xf2, 0xa5, 0xf4, 0x6b, - 0xb9, 0x4b, 0xf8, 0x6a, 0x3c, 0x94, 0x07, 0x4f, 0x74, 0x32, 0x68, 0x9c, 0x40, 0xbb, 0x09, 0x17, - 0xfb, 0x5d, 0x4b, 0xcc, 0x64, 0xca, 0x55, 0xaa, 0xb6, 0xc4, 0x64, 0xca, 0xad, 0x56, 0x30, 0x83, - 0x0c, 0x1a, 0x48, 0xc6, 0xfe, 0x1f, 0x16, 0x14, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xe4, 0x4f, 0x19, - 0xc2, 0xe4, 0x47, 0xb3, 0x2f, 0x44, 0x37, 0x57, 0x74, 0xbc, 0x92, 0x12, 0x1d, 0x9f, 0xcf, 0x23, - 0xd0, 0x5b, 0x50, 0xfc, 0xe3, 0x45, 0x18, 0xab, 0x05, 0xae, 0xb2, 0x57, 0xfe, 0xf5, 0x07, 0xb1, - 0x57, 0xce, 0x8d, 0xf0, 0xaf, 0x51, 0x66, 0x96, 0x56, 0xd2, 0xc9, 0xf2, 0x2f, 0x98, 0xd9, 0xf2, - 0x1d, 0xe2, 0x6d, 0xef, 0xc4, 0xc4, 0x4d, 0x7f, 0xce, 0xc9, 0x99, 0x2d, 0xff, 0x77, 0x0b, 0xa6, - 0x52, 0xad, 0xa3, 0x26, 0x4c, 0x34, 0x75, 0xc1, 0xa4, 0x58, 0xa7, 0x0f, 0x24, 0xd3, 0x14, 0x66, - 0x9f, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x05, 0x00, 0xa5, 0xa9, 0x93, 0x12, 0x30, 0xc6, 0xf5, 0x2b, - 0x55, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x12, 0x8c, 0xc5, 0x41, 0x3b, 0x68, 0x06, 0xdb, 0xfb, 0xd7, - 0x89, 0x0c, 0x5d, 0xa4, 0x8c, 0xb9, 0x36, 0x12, 0x10, 0xd6, 0xf1, 0xec, 0x9f, 0x2c, 0xf2, 0x0f, - 0xf5, 0x63, 0xef, 0x83, 0x35, 0xf9, 0xfe, 0x5e, 0x93, 0x5f, 0xb1, 0x60, 0x9a, 0xb6, 0xce, 0xcc, - 0x45, 0xe4, 0x65, 0xab, 0x82, 0x06, 0x5b, 0x3d, 0x82, 0x06, 0x5f, 0xa2, 0x67, 0x97, 0x1b, 0x74, - 0x62, 0x21, 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x6e, - 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0x32, 0xa6, 0xf0, 0x50, 0x76, 0x4c, 0x61, 0x1e, 0x88, 0x51, - 0x18, 0x16, 0x08, 0xb6, 0x47, 0x0b, 0xc4, 0x28, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8a, 0x30, - 0x5e, 0x0b, 0xdc, 0x44, 0x57, 0xf6, 0xa2, 0xa1, 0x2b, 0xbb, 0x98, 0xd2, 0x95, 0x4d, 0xeb, 0xb8, - 0x1f, 0x68, 0xc6, 0xbe, 0x56, 0x9a, 0xb1, 0x7f, 0x66, 0xb1, 0x59, 0xab, 0xac, 0xd7, 0xb9, 0xf5, - 0x11, 0x7a, 0x0e, 0xc6, 0xd8, 0x81, 0xc4, 0x9c, 0x2a, 0xa5, 0x02, 0x89, 0xe5, 0x50, 0x5a, 0x4f, - 0x8a, 0xb1, 0x8e, 0x83, 0x2e, 0x43, 0x29, 0x22, 0x4e, 0xd8, 0xd8, 0x51, 0x67, 0x9c, 0xd0, 0xf6, - 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x24, 0x06, 0x60, 0x31, 0xdf, 0x49, 0x4b, 0xef, 0x0f, 0xdf, - 0x22, 0xf9, 0x81, 0xff, 0xec, 0x3b, 0x80, 0xba, 0xf1, 0x07, 0x08, 0x7e, 0x35, 0x6f, 0x06, 0xbf, - 0x2a, 0x77, 0x05, 0xbe, 0xfa, 0x33, 0x0b, 0x26, 0x6b, 0x81, 0x4b, 0xb7, 0xee, 0xd7, 0xd3, 0x3e, - 0xd5, 0x03, 0xa0, 0x8e, 0xf4, 0x08, 0x80, 0xfa, 0x04, 0x0c, 0xd7, 0x02, 0xb7, 0x5a, 0xeb, 0xe5, - 0xdc, 0x6c, 0xff, 0x5d, 0x0b, 0x46, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xf9, 0x4f, 0x9a, 0xc2, 0xf9, - 0x47, 0x72, 0xd6, 0x4d, 0x8e, 0x3c, 0xfe, 0x17, 0x8a, 0x30, 0x41, 0xfb, 0x19, 0x6c, 0xcb, 0xa9, - 0x34, 0x86, 0xcd, 0x1a, 0x60, 0xd8, 0x28, 0x2f, 0x1c, 0x34, 0x9b, 0xc1, 0xdd, 0xf4, 0xb4, 0xae, - 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, 0x28, 0xb5, 0x43, 0xb2, 0xe7, 0x05, 0x82, 0xc9, 0xd4, 0x54, - 0x1d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0x9c, 0x45, 0x9e, 0xdf, 0x20, 0x75, 0xd2, 0x08, 0x7c, - 0x97, 0xcb, 0xaf, 0x8b, 0x22, 0x6f, 0x80, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x07, 0xca, 0xec, 0x3f, - 0x3b, 0x76, 0x8e, 0x9e, 0x4e, 0x52, 0xa4, 0x17, 0x13, 0x04, 0x70, 0x42, 0x0b, 0x3d, 0x0f, 0x10, - 0xcb, 0x10, 0xd9, 0x91, 0x08, 0x74, 0xa4, 0x18, 0x72, 0x15, 0x3c, 0x3b, 0xc2, 0x1a, 0x16, 0x7a, - 0x1a, 0xca, 0xb1, 0xe3, 0x35, 0x6f, 0x78, 0x3e, 0x89, 0x98, 0x5c, 0xba, 0x28, 0xb3, 0x7c, 0x89, - 0x42, 0x9c, 0xc0, 0x29, 0x43, 0xc4, 0xa2, 0x00, 0xf0, 0x64, 0xb4, 0x25, 0x86, 0xcd, 0x18, 0xa2, - 0x1b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0x2b, 0x70, 0xa6, 0x16, 0xb8, 0xb5, 0x20, 0x8c, 0x57, 0x83, - 0xf0, 0xae, 0x13, 0xba, 0x72, 0xfe, 0xe6, 0x65, 0x72, 0x10, 0x7a, 0x40, 0x0d, 0xf3, 0xed, 0x6b, - 0xa4, 0xa8, 0x7a, 0x81, 0xb1, 0x44, 0x47, 0xf4, 0x11, 0x69, 0xb0, 0xcb, 0x59, 0xa5, 0x81, 0xb8, - 0xea, 0xc4, 0x04, 0xdd, 0x64, 0xb9, 0x6a, 0x93, 0x7b, 0x4a, 0x54, 0x7f, 0x4a, 0xcb, 0x55, 0x9b, - 0x00, 0x33, 0x2f, 0x36, 0xb3, 0xbe, 0xfd, 0x33, 0x43, 0xec, 0xc8, 0x4a, 0xa5, 0x12, 0x40, 0x9f, - 0x83, 0xc9, 0x88, 0xdc, 0xf0, 0xfc, 0xce, 0x3d, 0xf9, 0x52, 0xef, 0xe1, 0xe5, 0x53, 0x5f, 0xd1, - 0x31, 0xb9, 0xbc, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0xa8, 0x05, 0x93, 0x77, 0x3d, 0xdf, 0x0d, 0xee, - 0x46, 0x92, 0x7e, 0x29, 0x5f, 0xec, 0x77, 0x87, 0x63, 0xa6, 0xfa, 0x68, 0x34, 0x77, 0xc7, 0x20, - 0x86, 0x53, 0xc4, 0xe9, 0xb2, 0x08, 0x3b, 0xfe, 0x62, 0x74, 0x2b, 0x22, 0xa1, 0xc8, 0x3a, 0xcc, - 0x96, 0x05, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x65, 0xc1, 0xfe, 0x5c, 0x0d, 0x83, 0x0e, 0x0f, 0x2f, - 0x2f, 0x96, 0x05, 0x56, 0xa5, 0x58, 0xc3, 0xa0, 0xdb, 0x86, 0xfd, 0x5b, 0x0f, 0x7c, 0x1c, 0x04, - 0xb1, 0xdc, 0x68, 0x2c, 0xcf, 0xa5, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, 0xb4, 0xdb, - 0x4d, 0x66, 0x3d, 0xe0, 0x34, 0x19, 0x29, 0xae, 0xb9, 0x2d, 0xf2, 0xe0, 0x99, 0xf5, 0x2e, 0x28, - 0xce, 0xa8, 0x41, 0x4f, 0xd0, 0x2d, 0xd1, 0xd5, 0x61, 0xd6, 0x55, 0xae, 0x22, 0xa8, 0xf3, 0x7e, - 0x4a, 0x18, 0x5a, 0x81, 0xd1, 0x68, 0x3f, 0x6a, 0xc4, 0x22, 0x0a, 0x58, 0x4e, 0xb6, 0x98, 0x3a, - 0x43, 0xd1, 0x92, 0x95, 0xf1, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x8d, 0x5d, 0xd0, 0x2c, 0x47, 0x6d, - 0xdc, 0x09, 0x09, 0x6a, 0xc1, 0x44, 0x9b, 0xad, 0x30, 0x11, 0x2f, 0x5d, 0x2c, 0x93, 0x17, 0x07, - 0x7c, 0x69, 0xdf, 0xa5, 0xe7, 0x9a, 0x92, 0x84, 0xb1, 0x27, 0x4c, 0x4d, 0x27, 0x87, 0x4d, 0xea, - 0xf6, 0x57, 0xce, 0xb2, 0x23, 0xbe, 0xce, 0x9f, 0xcf, 0xa3, 0xc2, 0xdc, 0x59, 0xbc, 0x15, 0xe6, - 0xf2, 0xe5, 0x38, 0xc9, 0x17, 0x09, 0x93, 0x69, 0x2c, 0xeb, 0xa2, 0xcf, 0xc2, 0x24, 0x65, 0xbd, - 0xb5, 0x7c, 0x11, 0xa7, 0xf3, 0xfd, 0xd2, 0x93, 0x34, 0x11, 0x5a, 0x2e, 0x05, 0xbd, 0x32, 0x4e, - 0x11, 0x43, 0x6f, 0x30, 0xc5, 0xbc, 0x99, 0x8a, 0xa2, 0x0f, 0x69, 0x5d, 0x07, 0x2f, 0xc9, 0x6a, - 0x44, 0xf2, 0xd2, 0x5c, 0xd8, 0x0f, 0x37, 0xcd, 0x05, 0xba, 0x01, 0x13, 0x22, 0x51, 0xab, 0x10, - 0x3f, 0x16, 0x0d, 0xf1, 0xd2, 0x04, 0xd6, 0x81, 0x87, 0xe9, 0x02, 0x6c, 0x56, 0x46, 0xdb, 0x70, - 0x5e, 0xcb, 0xb5, 0x72, 0x35, 0x74, 0x98, 0x8e, 0xd8, 0x63, 0x27, 0x91, 0x76, 0xf9, 0x3c, 0x7e, - 0xff, 0x60, 0xfe, 0xfc, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, 0x09, 0x67, 0xb8, 0x57, 0x65, - 0x85, 0x38, 0x6e, 0xd3, 0xf3, 0xd5, 0xed, 0xc6, 0x77, 0xcb, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x16, - 0xb3, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x49, 0x28, 0xbb, 0x7e, 0x24, 0xc6, 0x60, 0xc4, 0x48, 0x67, - 0x53, 0xae, 0xac, 0xd7, 0xd5, 0xf7, 0x27, 0x7f, 0x70, 0x52, 0x01, 0x6d, 0x73, 0x11, 0xa4, 0x7a, - 0xf1, 0x8f, 0x76, 0xc5, 0x83, 0x49, 0xcb, 0x8e, 0x0c, 0xbf, 0x2a, 0x2e, 0x7b, 0x57, 0xd6, 0xc6, - 0x86, 0xcb, 0x95, 0x41, 0x18, 0xbd, 0x0e, 0x88, 0xb2, 0xc4, 0x5e, 0x83, 0x2c, 0x36, 0x58, 0x30, - 0x7e, 0x26, 0xb1, 0x2d, 0x19, 0xde, 0x29, 0xa8, 0xde, 0x85, 0x81, 0x33, 0x6a, 0xa1, 0x6b, 0xf4, - 0x36, 0xd0, 0x4b, 0x85, 0xd5, 0xb4, 0x4a, 0x3e, 0x56, 0x21, 0xed, 0x90, 0x34, 0x9c, 0x98, 0xb8, - 0x26, 0x45, 0x9c, 0xaa, 0x87, 0x5c, 0x78, 0xcc, 0xe9, 0xc4, 0x01, 0x93, 0xee, 0x9a, 0xa8, 0x1b, - 0xc1, 0x2e, 0xf1, 0x99, 0x62, 0xa5, 0xb4, 0x74, 0xf1, 0xfe, 0xc1, 0xfc, 0x63, 0x8b, 0x3d, 0xf0, - 0x70, 0x4f, 0x2a, 0x94, 0xed, 0x51, 0xa9, 0x43, 0xc1, 0x0c, 0x73, 0x93, 0x91, 0x3e, 0xf4, 0x25, - 0x18, 0xdb, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0x77, 0x83, 0x70, 0x57, 0x04, 0x2b, 0x4c, 0x02, 0xdc, - 0x26, 0x20, 0xac, 0xe3, 0xd1, 0x77, 0x0d, 0x53, 0xfb, 0x57, 0x2b, 0x4c, 0xe3, 0x5a, 0x4a, 0xce, - 0x98, 0x6b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x96, 0x99, 0xf6, 0x34, 0x85, 0x5a, 0xad, - 0x2d, 0x63, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x71, 0x42, 0x52, 0x0b, 0x83, 0x06, 0x89, 0xb4, 0xb0, - 0xca, 0x8f, 0xf2, 0x50, 0x8c, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, 0x3d, 0x44, 0xba, 0xf3, - 0x0c, 0x4d, 0xe6, 0x8b, 0xbd, 0xbb, 0x59, 0x81, 0x01, 0x53, 0x0d, 0xf9, 0x30, 0xad, 0x32, 0x1c, - 0xf1, 0xe0, 0x8b, 0xd1, 0xec, 0x14, 0x5b, 0xdb, 0x83, 0x47, 0x6e, 0x54, 0x8a, 0x84, 0x6a, 0x8a, - 0x12, 0xee, 0xa2, 0x6d, 0x44, 0x32, 0x9a, 0xee, 0x9b, 0x4b, 0xf6, 0x0a, 0x94, 0xa3, 0xce, 0xa6, - 0x1b, 0xb4, 0x1c, 0xcf, 0x67, 0xda, 0x53, 0x8d, 0xc1, 0xae, 0x4b, 0x00, 0x4e, 0x70, 0xd0, 0x2a, - 0x94, 0x1c, 0xa9, 0x25, 0x40, 0xf9, 0x01, 0x30, 0x94, 0x6e, 0x80, 0xfb, 0x84, 0x4b, 0xbd, 0x80, - 0xaa, 0x8b, 0x5e, 0x85, 0x09, 0xe1, 0x15, 0x28, 0x92, 0xeb, 0x9d, 0x32, 0x3d, 0x37, 0xea, 0x3a, - 0x10, 0x9b, 0xb8, 0xe8, 0x16, 0x8c, 0xc5, 0x41, 0x93, 0xb9, 0x1f, 0x50, 0x0e, 0xe9, 0x6c, 0x7e, - 0x10, 0xad, 0x0d, 0x85, 0xa6, 0x0b, 0xe8, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, 0xbd, 0xb3, - 0xf0, 0xc2, 0x24, 0x9a, 0x7d, 0x24, 0xff, 0x4e, 0x52, 0x51, 0x88, 0xcd, 0xed, 0x20, 0x6a, 0x62, - 0x9d, 0x0c, 0xba, 0x0a, 0x33, 0xed, 0xd0, 0x0b, 0xd8, 0x9a, 0x50, 0x0a, 0xa2, 0x59, 0x33, 0x29, - 0x4a, 0x2d, 0x8d, 0x80, 0xbb, 0xeb, 0x30, 0xa7, 0x4e, 0x51, 0x38, 0x7b, 0x8e, 0x27, 0xd3, 0xe5, - 0xef, 0x15, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x3f, 0xb5, 0x67, 0xe7, 0xf2, 0x63, - 0x6e, 0xe8, 0x4f, 0x72, 0xce, 0xf7, 0xa9, 0xbf, 0x38, 0xa1, 0x80, 0x5c, 0x2d, 0x51, 0x1b, 0x65, - 0xb6, 0xa3, 0xd9, 0xc7, 0x7a, 0xd8, 0x5e, 0xa5, 0x38, 0xf3, 0x84, 0x21, 0x30, 0x8a, 0x23, 0x9c, - 0xa2, 0x89, 0xbe, 0x09, 0xa6, 0x45, 0x8c, 0xaf, 0x64, 0x98, 0xce, 0x27, 0x46, 0x9d, 0x38, 0x05, - 0xc3, 0x5d, 0xd8, 0x3c, 0xec, 0xba, 0xb3, 0xd9, 0x24, 0xe2, 0xe8, 0xbb, 0xe1, 0xf9, 0xbb, 0xd1, - 0xec, 0x05, 0x76, 0x3e, 0x88, 0xb0, 0xeb, 0x69, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xba, 0x1d, - 0x12, 0xd2, 0x62, 0x3c, 0xb2, 0xb8, 0xcf, 0xe6, 0xb9, 0x4f, 0x33, 0xed, 0x49, 0x2d, 0x05, 0x3b, - 0xcc, 0x28, 0xc3, 0x5d, 0x14, 0xd0, 0x5d, 0x28, 0x05, 0x7b, 0x24, 0xdc, 0x21, 0x8e, 0x3b, 0x7b, - 0xb1, 0x87, 0x91, 0xb1, 0xb8, 0xdc, 0x6e, 0x0a, 0xdc, 0x94, 0x52, 0x59, 0x16, 0xf7, 0x57, 0x2a, - 0xcb, 0xc6, 0xd0, 0x0f, 0x58, 0x70, 0x4e, 0xca, 0xa1, 0xeb, 0x6d, 0x3a, 0xea, 0xcb, 0x81, 0x1f, - 0xc5, 0x21, 0xf7, 0xc2, 0x7d, 0x3c, 0xdf, 0x31, 0x75, 0x23, 0xa7, 0x92, 0x92, 0xf6, 0x9d, 0xcb, - 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x73, 0xdf, 0x08, 0x33, 0x5d, 0x37, 0xf7, 0x51, 0x32, 0x41, 0xcc, - 0xed, 0xc2, 0x84, 0x31, 0x3a, 0x0f, 0x55, 0x9f, 0xf8, 0xaf, 0x47, 0xa1, 0xac, 0x74, 0x4d, 0xe8, - 0x8a, 0xa9, 0x42, 0x3c, 0x97, 0x56, 0x21, 0x96, 0xe8, 0x6b, 0x56, 0xd7, 0x1a, 0x6e, 0x64, 0xc4, - 0x3c, 0xca, 0xdb, 0x8b, 0x83, 0xfb, 0xb2, 0x6a, 0xa2, 0xc3, 0xe2, 0xc0, 0xba, 0xc8, 0xa1, 0x9e, - 0xd2, 0xc8, 0xab, 0x30, 0xe3, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, 0x05, 0xd8, 0x95, 0x5f, 0xd6, - 0x83, 0x08, 0xa4, 0x10, 0x70, 0x77, 0x1d, 0xda, 0x20, 0xbf, 0xb3, 0xd3, 0xe2, 0x4f, 0x7e, 0xa5, - 0x63, 0x01, 0x45, 0x4f, 0xc0, 0x70, 0x3b, 0x70, 0xab, 0x35, 0xc1, 0x2a, 0x6a, 0x59, 0x41, 0xdd, - 0x6a, 0x0d, 0x73, 0x18, 0x5a, 0x84, 0x11, 0xf6, 0x23, 0x9a, 0x1d, 0xcf, 0xf7, 0x16, 0x67, 0x35, - 0xb4, 0x3c, 0x1b, 0xac, 0x02, 0x16, 0x15, 0x99, 0x18, 0x86, 0xf2, 0xd7, 0x4c, 0x0c, 0x33, 0xfa, - 0x80, 0x62, 0x18, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, 0xbc, 0x69, 0xf8, 0x12, 0x21, - 0x91, 0x70, 0x58, 0x7d, 0xa2, 0xe7, 0x63, 0x46, 0xe8, 0x2e, 0xcf, 0x8b, 0x4e, 0x9f, 0xa9, 0x66, - 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, - 0xee, 0x16, 0xbb, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x4e, 0x10, 0xb1, 0x63, 0x56, 0xb0, 0xb7, 0xd2, - 0xdb, 0xb1, 0xf4, 0xc6, 0xcd, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xab, 0x05, 0xae, 0xfc, 0x8b, - 0x55, 0x05, 0xf4, 0x3d, 0x16, 0xcc, 0x75, 0x3f, 0x9a, 0x54, 0xa7, 0x27, 0x06, 0xef, 0xb4, 0x2d, - 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0x7b, 0x34, 0x65, 0xff, 0x32, 0xd7, 0x33, 0x0a, 0x6d, 0x04, - 0x89, 0x3a, 0xcd, 0x93, 0xc8, 0x4b, 0xb8, 0x62, 0x28, 0x4a, 0x1e, 0x58, 0x97, 0xfd, 0x6b, 0x16, - 0xd3, 0x65, 0x6f, 0x90, 0x56, 0xbb, 0xe9, 0xc4, 0x27, 0xe1, 0x2c, 0xf7, 0x06, 0x94, 0x62, 0xd1, - 0x5a, 0xaf, 0x54, 0x8a, 0x5a, 0xa7, 0x98, 0x3e, 0x5f, 0x31, 0x9b, 0xb2, 0x14, 0x2b, 0x32, 0xf6, - 0x3f, 0xe6, 0x33, 0x20, 0x21, 0x27, 0x20, 0x8f, 0xae, 0x98, 0xf2, 0xe8, 0xf9, 0x3e, 0x5f, 0x90, - 0x23, 0x97, 0xfe, 0x47, 0x66, 0xbf, 0x99, 0x90, 0xe5, 0xfd, 0x6e, 0x44, 0x61, 0xff, 0x90, 0x05, - 0xa7, 0xb3, 0xac, 0x0e, 0xe9, 0x03, 0x81, 0x8b, 0x78, 0x94, 0x51, 0x89, 0x1a, 0xc1, 0xdb, 0xa2, - 0x1c, 0x2b, 0x8c, 0x81, 0xb3, 0x14, 0x1d, 0x2d, 0x6a, 0xe7, 0x4d, 0x98, 0xa8, 0x85, 0x44, 0xbb, - 0xd0, 0x5e, 0xe3, 0xde, 0xaf, 0xbc, 0x3f, 0xcf, 0x1c, 0xd9, 0xf3, 0xd5, 0xfe, 0xe9, 0x02, 0x9c, - 0xe6, 0x5a, 0xe1, 0xc5, 0xbd, 0xc0, 0x73, 0x6b, 0x81, 0x2b, 0x32, 0x4c, 0xbd, 0x09, 0xe3, 0x6d, - 0x4d, 0x2e, 0xd7, 0x2b, 0x02, 0x9d, 0x2e, 0xbf, 0x4b, 0x24, 0x09, 0x7a, 0x29, 0x36, 0x68, 0x21, - 0x17, 0xc6, 0xc9, 0x9e, 0xd7, 0x50, 0xaa, 0xc5, 0xc2, 0x91, 0x2f, 0x17, 0xd5, 0xca, 0x8a, 0x46, - 0x07, 0x1b, 0x54, 0x1f, 0x42, 0xd2, 0x51, 0xfb, 0x87, 0x2d, 0x78, 0x24, 0x27, 0x5e, 0x1d, 0x6d, - 0xee, 0x2e, 0xd3, 0xbf, 0x8b, 0xfc, 0x85, 0xaa, 0x39, 0xae, 0x95, 0xc7, 0x02, 0x8a, 0x3e, 0x0d, - 0xc0, 0xb5, 0xea, 0xf4, 0x85, 0xda, 0x2f, 0xb0, 0x97, 0x11, 0x93, 0x48, 0x0b, 0x2f, 0x23, 0xeb, - 0x63, 0x8d, 0x96, 0xfd, 0x13, 0x45, 0x18, 0xe6, 0x99, 0x97, 0x57, 0x61, 0x74, 0x87, 0xc7, 0xdd, - 0x1f, 0x24, 0xc4, 0x7f, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, 0x6b, 0x70, 0x8a, 0xe7, 0x2d, - 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0xf3, 0x4f, 0x09, 0xfc, 0xaa, 0xdd, 0x28, - 0x38, 0xab, 0x1e, 0x7a, 0x0d, 0x26, 0xe9, 0xc3, 0x23, 0xe8, 0xc4, 0x92, 0x12, 0xcf, 0x58, 0xa0, - 0x5e, 0x3a, 0x1b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xdd, 0x25, 0xd2, 0x1b, 0x4e, 0xde, - 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0xe6, 0x86, 0x1d, 0x66, 0x5c, 0xb9, 0xb1, 0x13, 0x92, 0x68, - 0x27, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd6, 0xcc, 0x0d, 0x53, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, - 0x72, 0xbc, 0x66, 0x27, 0x24, 0x09, 0x95, 0x11, 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, - 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0x31, 0x38, 0x94, 0x0d, 0xe9, 0xa8, 0x74, 0x17, - 0xec, 0x11, 0xae, 0x4a, 0x58, 0xd9, 0x71, 0x0a, 0x86, 0x02, 0xb9, 0x2e, 0x1c, 0x05, 0x25, 0x15, - 0xf4, 0x1c, 0x8c, 0x89, 0x68, 0xf4, 0xcc, 0xd4, 0x91, 0x4f, 0x1d, 0x53, 0x78, 0x57, 0x92, 0x62, - 0xac, 0xe3, 0xd8, 0xdf, 0x5b, 0x80, 0x53, 0x19, 0xb6, 0xea, 0xfc, 0xa8, 0xda, 0xf6, 0xa2, 0x58, - 0xe5, 0x35, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, - 0xc2, 0x16, 0x54, 0x40, 0x8f, 0x98, 0x21, 0xec, 0x22, 0x0c, 0x75, 0x22, 0x22, 0x03, 0xcd, 0xa9, - 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0xdb, 0x4a, 0x79, 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, - 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, 0x12, 0x31, 0x89, 0x95, 0x62, - 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0xde, 0x2b, 0xb4, 0xeb, 0xad, 0xc0, 0xf7, 0xe2, 0x40, - 0x59, 0x12, 0xf0, 0x28, 0x49, 0xa4, 0xbd, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x60, 0x98, - 0x09, 0x9d, 0xba, 0x32, 0xbc, 0x2d, 0x55, 0x78, 0xd4, 0x0c, 0x0e, 0x1e, 0x38, 0x7b, 0xe6, 0x13, - 0x30, 0xd4, 0x0e, 0x82, 0x66, 0xfa, 0xd0, 0xa2, 0xdd, 0x0d, 0x82, 0x26, 0x66, 0x40, 0xf4, 0x11, - 0x31, 0x5e, 0x29, 0xd5, 0x39, 0x76, 0xdc, 0x20, 0xd2, 0x06, 0xed, 0x29, 0x18, 0xdd, 0x25, 0xfb, - 0xa1, 0xe7, 0x6f, 0xa7, 0x4d, 0x2a, 0xae, 0xf3, 0x62, 0x2c, 0xe1, 0x66, 0xb2, 0x9e, 0xd1, 0xe3, - 0x4e, 0x7b, 0x59, 0xea, 0x7b, 0x05, 0x7e, 0x5f, 0x11, 0xa6, 0xf0, 0x52, 0xe5, 0x83, 0x89, 0xb8, - 0xd5, 0x3d, 0x11, 0xc7, 0x9d, 0xf6, 0xb2, 0xff, 0x6c, 0xfc, 0x82, 0x05, 0x53, 0x2c, 0x26, 0xbe, - 0x08, 0xaf, 0xe3, 0x05, 0xfe, 0x09, 0xb0, 0x78, 0x4f, 0xc0, 0x70, 0x48, 0x1b, 0x4d, 0xa7, 0x76, - 0x63, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x21, 0xd6, 0x05, 0x3a, 0x79, 0xe3, 0x3c, 0x2b, 0x4e, - 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0xcc, 0x08, 0x4c, 0xda, 0x4d, 0x8f, 0x77, 0x3a, 0x51, 0x09, - 0xbe, 0x3f, 0x62, 0x46, 0x64, 0x76, 0xed, 0xbd, 0xc5, 0x8c, 0xc8, 0x26, 0xd9, 0xfb, 0xf9, 0xf4, - 0x87, 0x05, 0xb8, 0x90, 0x59, 0x6f, 0xe0, 0x98, 0x11, 0xbd, 0x6b, 0x3f, 0xcc, 0xd8, 0xe9, 0xc5, - 0x13, 0x34, 0x58, 0x1b, 0x1a, 0x94, 0xc3, 0x1c, 0x1e, 0x20, 0x94, 0x43, 0xe6, 0x90, 0xbd, 0x4f, - 0x42, 0x39, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0f, 0xc1, 0xcb, - 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, - 0x4c, 0xb5, 0x3c, 0x9f, 0x1e, 0x3e, 0xfb, 0x26, 0xe3, 0xa7, 0x22, 0xed, 0xac, 0x99, 0x60, 0x9c, - 0xc6, 0x47, 0x9e, 0x16, 0xe6, 0xa1, 0x90, 0x9f, 0x2c, 0x39, 0xb7, 0xb7, 0x0b, 0xa6, 0xba, 0x54, - 0x8d, 0x62, 0x46, 0xc8, 0x87, 0x35, 0xed, 0xfd, 0x5f, 0x1c, 0xfc, 0xfd, 0x3f, 0x9e, 0xfd, 0xf6, - 0x9f, 0x7b, 0x15, 0x26, 0x1e, 0x58, 0xe0, 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf6, 0xd8, 0xf6, 0xfc, - 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x06, 0xa7, 0xb7, 0x3a, 0xcd, 0xe6, 0x3e, - 0xb3, 0x09, 0x27, 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x63, 0x32, 0x0f, 0xd1, 0x6a, 0x06, 0x0e, 0xce, - 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, 0x1e, 0xeb, 0x40, 0x6c, 0xe2, - 0xa2, 0xab, 0x30, 0xe3, 0xec, 0x39, 0x1e, 0x0f, 0x96, 0x29, 0x09, 0x70, 0x8e, 0x5e, 0xc9, 0xe9, - 0x16, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0x7a, 0x1d, 0x50, 0x20, 0x92, 0xbd, 0x5f, 0x25, 0xbe, 0xd0, - 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xcd, 0x2e, 0x0c, 0x9c, 0x51, 0x2b, 0x15, 0x36, 0x61, - 0x24, 0x3f, 0x6c, 0x42, 0xef, 0x73, 0xb1, 0x6f, 0xd8, 0xfe, 0xff, 0x62, 0xd1, 0xeb, 0x8b, 0x33, - 0xf9, 0x66, 0xf4, 0xaf, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, 0x11, 0x0c, 0xce, 0x68, 0x06, - 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xe3, 0x9c, 0xc1, 0xe2, 0x8b, 0x10, 0x25, - 0x0a, 0x03, 0x7d, 0x06, 0x46, 0x5d, 0x6f, 0xcf, 0x8b, 0x82, 0x50, 0xac, 0xf4, 0x23, 0xaa, 0x0b, - 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, 0xc8, 0x16, 0xdf, 0xe8, - 0x04, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x55, 0xe3, 0x5a, 0xfe, 0x48, 0xaf, 0x38, 0x2d, 0xac, 0x4b, - 0xb9, 0xd7, 0xf1, 0xcd, 0xd4, 0x75, 0xfc, 0x64, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x62, 0xc1, - 0x8c, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c, 0xde, 0xf7, 0x1b, 0x72, 0x6e, 0x81, - 0xef, 0x2a, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x60, 0x68, 0xc7, 0x09, 0xdd, 0x5e, 0x81, 0xa9, - 0xbb, 0x2a, 0x2d, 0x5c, 0x73, 0x42, 0xa1, 0xd6, 0x7b, 0x46, 0xe5, 0x2a, 0x76, 0xc2, 0xfe, 0x2a, - 0x3d, 0xd6, 0x14, 0x7a, 0x05, 0x46, 0xa2, 0x46, 0xd0, 0x56, 0x56, 0xdc, 0x17, 0x79, 0x1e, 0x63, - 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x4d, 0x98, 0x60, 0xbf, - 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x89, 0x4d, 0x5d, 0x47, 0xe4, 0x06, 0x68, 0x46, 0x11, 0x36, 0x49, - 0xcd, 0x6d, 0x43, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xfb, 0x22, 0x9c, 0xca, 0x58, 0x73, - 0x28, 0x32, 0x66, 0xe2, 0xb9, 0x01, 0x97, 0xea, 0x7b, 0x9c, 0x8b, 0x88, 0xbd, 0x86, 0x5c, 0xb1, - 0xb6, 0x06, 0x6e, 0xf4, 0x56, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xfe, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, - 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, 0xa7, 0xb3, 0x42, 0x47, 0xa1, - 0x6f, 0x4d, 0x25, 0x34, 0x7b, 0x71, 0xd0, 0xa0, 0x53, 0x3c, 0xcb, 0x19, 0x97, 0x01, 0x2f, 0x2d, - 0x98, 0x29, 0xce, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x85, 0x87, 0x3c, 0x11, 0x9d, 0x3c, 0x3e, - 0x3e, 0x3e, 0x70, 0x07, 0x44, 0x06, 0xbb, 0x28, 0xa5, 0xbf, 0x97, 0xc5, 0xfd, 0xf5, 0xf7, 0xb2, - 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, - 0x75, 0xd6, 0x7f, 0xd8, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x8b, 0x30, - 0x14, 0x06, 0x4d, 0x92, 0xce, 0x1f, 0x86, 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd8, - 0x31, 0xae, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0xa7, 0x79, - 0xb8, 0x41, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x30, 0x04, 0xe7, 0x7b, 0x86, 0x55, 0xa0, 0xcf, 0xa1, - 0x6d, 0x27, 0x26, 0x77, 0x9d, 0xfd, 0x74, 0x3c, 0xf6, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, - 0xe1, 0x51, 0x55, 0x53, 0x42, 0x44, 0x11, 0x4c, 0x55, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, - 0x52, 0xcf, 0x03, 0x44, 0x51, 0x93, 0x1b, 0xbe, 0xb8, 0xc2, 0x3d, 0x25, 0x89, 0xbe, 0x5b, 0xbf, - 0x21, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, 0x6e, 0x87, 0x41, 0xcc, 0x65, 0xb2, 0x15, 0x6e, 0x1b, - 0x36, 0x6c, 0x7a, 0xb4, 0xd7, 0x52, 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x63, 0xc2, 0xcb, 0xbd, - 0x16, 0x04, 0x4d, 0x21, 0x06, 0x52, 0xe6, 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, - 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x15, 0x46, 0xae, 0x34, 0x50, 0x18, 0xb9, - 0x44, 0x30, 0x56, 0x1e, 0x58, 0xb7, 0x05, 0x7d, 0x45, 0x49, 0x3f, 0x3b, 0x04, 0xa7, 0xc4, 0xc2, - 0x79, 0xd8, 0xcb, 0xe5, 0x56, 0xf7, 0x72, 0x39, 0x0e, 0xd1, 0xd9, 0x07, 0x6b, 0xe6, 0xa4, 0xd7, - 0xcc, 0xf7, 0x5b, 0x60, 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x68, 0xf1, 0x52, 0x2e, 0xbb, 0xe6, - 0xca, 0x0b, 0xe4, 0x3d, 0xa6, 0xb6, 0xb0, 0xff, 0x93, 0x05, 0x8f, 0xf7, 0xa5, 0x88, 0x56, 0xa0, - 0xcc, 0x78, 0x40, 0xed, 0x75, 0xf6, 0xa4, 0xb2, 0x1d, 0x95, 0x80, 0x1c, 0x96, 0x34, 0xa9, 0x89, - 0x56, 0xba, 0x32, 0x87, 0x3c, 0x95, 0x91, 0x39, 0xe4, 0x8c, 0x31, 0x3c, 0x0f, 0x98, 0x3a, 0xe4, - 0x97, 0x8b, 0x30, 0xc2, 0x57, 0xfc, 0x09, 0x3c, 0xc3, 0x56, 0x85, 0xdc, 0xb6, 0x47, 0x9c, 0x3a, - 0xde, 0x97, 0x85, 0x8a, 0x13, 0x3b, 0x9c, 0x4d, 0x50, 0xb7, 0x55, 0x22, 0xe1, 0x45, 0x9f, 0x03, - 0x88, 0xe2, 0xd0, 0xf3, 0xb7, 0x69, 0x99, 0x88, 0x60, 0xf8, 0xd1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, - 0xd3, 0x4c, 0x76, 0xae, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0xee, 0xcb, 0xb9, 0x94, 0xe0, 0x13, - 0x38, 0xd5, 0xe4, 0xf6, 0x9c, 0x7b, 0x19, 0xca, 0x8a, 0x78, 0x3f, 0x29, 0xce, 0xb8, 0xce, 0x5c, - 0x7c, 0x0a, 0xa6, 0x52, 0x7d, 0x3b, 0x92, 0x10, 0xe8, 0x17, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, - 0xf7, 0xc4, 0x99, 0xfa, 0x2e, 0x9c, 0x6e, 0x66, 0x9c, 0x6d, 0x62, 0x46, 0x07, 0x3f, 0x0b, 0x95, - 0xd0, 0x27, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0xba, 0xa5, 0x67, 0x97, 0xd3, 0x14, 0xce, - 0x86, 0xe3, 0x7c, 0xcd, 0xf2, 0x32, 0xac, 0xa0, 0xf6, 0x6f, 0x5b, 0x30, 0xc3, 0x7b, 0x7e, 0x9d, - 0xec, 0xab, 0x1d, 0xfe, 0xb5, 0xec, 0xbb, 0x48, 0xe6, 0x53, 0xc8, 0x49, 0xe6, 0xa3, 0x7f, 0x5a, - 0xb1, 0xe7, 0xa7, 0xfd, 0xb4, 0x05, 0x62, 0x85, 0x9c, 0xc0, 0x53, 0xfe, 0x1b, 0xcd, 0xa7, 0xfc, - 0x5c, 0xfe, 0x26, 0xc8, 0x79, 0xc3, 0xff, 0x99, 0x05, 0xd3, 0x1c, 0x21, 0xd1, 0x39, 0x7f, 0x4d, - 0xe7, 0x61, 0x90, 0x94, 0x9f, 0xd7, 0xc9, 0xfe, 0x46, 0x50, 0x73, 0xe2, 0x9d, 0xec, 0x8f, 0x32, - 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb9, 0x72, 0x03, 0x1d, 0x21, 0x8f, 0xf0, 0x91, 0x43, 0xdd, 0xdb, - 0x5f, 0xb5, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, - 0x49, 0x41, 0xb0, 0x86, 0x75, 0x2c, 0xc3, 0x93, 0x32, 0x1c, 0x28, 0xf6, 0x37, 0x1c, 0x38, 0xc2, - 0x88, 0xfe, 0xc1, 0x30, 0xa4, 0x3d, 0x40, 0xd0, 0x6d, 0x18, 0x6f, 0x38, 0x6d, 0x67, 0xd3, 0x6b, - 0x7a, 0xb1, 0x47, 0xa2, 0x5e, 0x16, 0x47, 0xcb, 0x1a, 0x9e, 0x50, 0xf5, 0x6a, 0x25, 0xd8, 0xa0, - 0x83, 0x16, 0x00, 0xda, 0xa1, 0xb7, 0xe7, 0x35, 0xc9, 0x36, 0x93, 0x38, 0x30, 0xf7, 0x66, 0x6e, - 0x46, 0x23, 0x4b, 0xb1, 0x86, 0x91, 0xe1, 0xa9, 0x5a, 0x7c, 0xc8, 0x9e, 0xaa, 0x70, 0x62, 0x9e, - 0xaa, 0x43, 0x47, 0xf2, 0x54, 0x2d, 0x1d, 0xd9, 0x53, 0x75, 0x78, 0x20, 0x4f, 0x55, 0x0c, 0x67, - 0x25, 0x07, 0x47, 0xff, 0xaf, 0x7a, 0x4d, 0x22, 0xd8, 0x76, 0xee, 0x93, 0x3d, 0x77, 0xff, 0x60, - 0xfe, 0x2c, 0xce, 0xc4, 0xc0, 0x39, 0x35, 0xd1, 0xa7, 0x61, 0xd6, 0x69, 0x36, 0x83, 0xbb, 0x6a, - 0x52, 0x57, 0xa2, 0x86, 0xd3, 0xe4, 0xa2, 0xfc, 0x51, 0x46, 0xf5, 0xb1, 0xfb, 0x07, 0xf3, 0xb3, - 0x8b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0xfa, 0x24, 0x94, 0xdb, 0x61, 0xd0, 0x58, 0xd3, 0xdc, 0xd4, - 0x2e, 0xd0, 0x01, 0xac, 0xc9, 0xc2, 0xc3, 0x83, 0xf9, 0x09, 0xf5, 0x87, 0x5d, 0xf8, 0x49, 0x05, - 0x7b, 0x17, 0x4e, 0xd5, 0x49, 0xe8, 0xb1, 0xac, 0xc0, 0x6e, 0x72, 0x7e, 0x6c, 0x40, 0x39, 0x4c, - 0x9d, 0x98, 0x03, 0xc5, 0x76, 0xd3, 0x62, 0x82, 0xcb, 0x13, 0x32, 0x21, 0x64, 0xff, 0x6f, 0x0b, - 0x46, 0x85, 0x47, 0xc6, 0x09, 0x30, 0x6a, 0x8b, 0x86, 0xbc, 0x7c, 0x3e, 0xfb, 0x56, 0x61, 0x9d, - 0xc9, 0x95, 0x94, 0x57, 0x53, 0x92, 0xf2, 0xc7, 0x7b, 0x11, 0xe9, 0x2d, 0x23, 0xff, 0x9b, 0x45, - 0x98, 0x34, 0x5d, 0xf7, 0x4e, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0xbe, 0x69, 0x85, 0x7c, 0x8b, - 0xec, 0xf4, 0x24, 0x26, 0xd6, 0x5a, 0xc2, 0x1b, 0x4d, 0x12, 0xc9, 0x74, 0x7a, 0x2b, 0x3e, 0x44, - 0xa7, 0xb7, 0x7e, 0xde, 0x93, 0x43, 0xc7, 0xe1, 0x3d, 0x69, 0x7f, 0x99, 0xdd, 0x6c, 0x7a, 0xf9, - 0x09, 0x30, 0x3d, 0x57, 0xcd, 0x3b, 0xd0, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x9f, - 0xb7, 0xe0, 0x7c, 0xc6, 0x57, 0x69, 0x9c, 0xd0, 0x33, 0x50, 0x72, 0x3a, 0xae, 0xa7, 0xf6, 0xb2, - 0xa6, 0x35, 0x5b, 0x14, 0xe5, 0x58, 0x61, 0xa0, 0x65, 0x98, 0x21, 0xf7, 0xda, 0x1e, 0x57, 0x18, - 0xea, 0x26, 0x95, 0x45, 0x1e, 0xef, 0x7a, 0x25, 0x0d, 0xc4, 0xdd, 0xf8, 0x2a, 0xd8, 0x43, 0x31, - 0x37, 0xd8, 0xc3, 0xdf, 0xb7, 0x60, 0x4c, 0x79, 0x67, 0x3d, 0xf4, 0xd1, 0xfe, 0x26, 0x73, 0xb4, - 0x1f, 0xed, 0x31, 0xda, 0x39, 0xc3, 0xfc, 0xb7, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0x07, 0xe0, - 0xb0, 0x5e, 0x81, 0x52, 0x3b, 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0xf5, 0x58, 0x12, 0x8b, - 0x84, 0x97, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0x66, 0xa3, 0x17, 0x84, 0xb1, 0x60, 0x6a, 0x92, 0xd1, - 0x0b, 0xc2, 0x18, 0x33, 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xb1, 0x8f, - 0xf2, 0x0f, 0x8f, 0x4e, 0xec, 0x35, 0x17, 0x3c, 0x3f, 0x8e, 0xe2, 0x70, 0xa1, 0xea, 0xc7, 0x37, - 0x43, 0xfe, 0x5e, 0xd3, 0x82, 0x8b, 0x28, 0x5a, 0x58, 0xa3, 0x2b, 0xdd, 0x8a, 0x59, 0x1b, 0xc3, - 0xa6, 0xfe, 0x7d, 0x5d, 0x94, 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, - 0xfb, 0xf1, 0x9d, 0x65, 0x35, 0xb4, 0x4c, 0xf9, 0x56, 0xd1, 0xa3, 0x8b, 0xf4, 0x3e, 0xb9, 0x69, - 0xc3, 0xba, 0x8b, 0x51, 0x12, 0x82, 0x04, 0x7d, 0x73, 0x97, 0x4d, 0xc5, 0xb3, 0x7d, 0xae, 0x80, - 0x23, 0x58, 0x51, 0xb0, 0x18, 0xfc, 0x2c, 0x42, 0x79, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0x83, 0x5f, - 0x00, 0x70, 0x82, 0x83, 0xae, 0x88, 0xd7, 0xf8, 0x90, 0x91, 0x79, 0x52, 0xbe, 0xc6, 0xe5, 0xe7, - 0x6b, 0xc2, 0xec, 0xe7, 0x60, 0x4c, 0x65, 0xa0, 0xac, 0xf1, 0xc4, 0x86, 0x22, 0x12, 0xd4, 0x4a, - 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x53, 0x11, 0x17, 0xf5, 0xa8, 0x80, 0x9f, 0x5c, 0x64, 0xf6, - 0x51, 0x69, 0x88, 0x52, 0x37, 0xc1, 0x87, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0x95, 0x37, 0x4d, 0x02, - 0xbd, 0x06, 0x93, 0xcd, 0xc0, 0x71, 0x97, 0x9c, 0xa6, 0xe3, 0x37, 0xd8, 0xf7, 0x96, 0xcc, 0x44, - 0x66, 0x37, 0x0c, 0x28, 0x4e, 0x61, 0x53, 0xce, 0x47, 0x2f, 0x11, 0x41, 0x6a, 0x1d, 0x7f, 0x9b, - 0x44, 0x22, 0x9f, 0x20, 0xe3, 0x7c, 0x6e, 0xe4, 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x15, 0x18, 0x97, - 0x9f, 0xaf, 0x79, 0xbe, 0x27, 0xb6, 0xf7, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x2e, 0x9c, 0x91, 0xff, - 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6b, 0x08, 0x77, 0x50, 0xee, 0x18, 0xb7, 0x28, 0xbd, 0xb7, 0x56, - 0xb2, 0x90, 0x0e, 0x0f, 0xe6, 0x2f, 0x8a, 0x51, 0xcb, 0x84, 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, - 0xe0, 0xd4, 0x0e, 0x71, 0x9a, 0xf1, 0xce, 0xf2, 0x0e, 0x69, 0xec, 0xca, 0x4d, 0xc4, 0xfc, 0xe9, - 0x35, 0x8b, 0xf5, 0x6b, 0xdd, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0x66, 0xdb, 0x9d, 0xcd, 0xa6, - 0x17, 0xed, 0xac, 0x07, 0x31, 0xb3, 0x46, 0x51, 0x09, 0x2d, 0x85, 0xe3, 0xbd, 0x8a, 0x58, 0x50, - 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0xbb, 0x70, 0x26, 0xb5, 0x18, 0x84, 0xeb, 0xf1, 0x64, 0x7e, - 0xc8, 0xef, 0x7a, 0x56, 0x05, 0xe1, 0xc5, 0x9f, 0x05, 0xc2, 0xd9, 0x4d, 0xa0, 0x17, 0xa1, 0xe4, - 0xb5, 0x57, 0x9d, 0x96, 0xd7, 0xdc, 0x67, 0x31, 0xcb, 0xcb, 0x2c, 0x8e, 0x77, 0xa9, 0x5a, 0xe3, - 0x65, 0x87, 0xda, 0x6f, 0xac, 0x30, 0xdf, 0x9b, 0x35, 0xd2, 0x3b, 0xb4, 0xb2, 0xc6, 0xca, 0xa1, - 0xcf, 0xc3, 0xb8, 0xbe, 0xf6, 0xc4, 0xb5, 0x74, 0x29, 0x9b, 0xd3, 0xd1, 0xd6, 0x28, 0x67, 0x04, - 0xd5, 0x3a, 0xd4, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x15, 0x74, 0x03, 0x4a, 0x8d, 0xa6, - 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x15, 0x88, 0x68, 0x59, 0xe0, 0x88, 0x61, 0x16, 0x91, 0x95, 0x79, - 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x7c, 0x9f, 0x30, 0xdd, 0x29, 0xa1, 0xb9, 0x35, 0x90, - 0xd0, 0x7c, 0x51, 0x26, 0xf5, 0x5c, 0x4f, 0x49, 0x12, 0x52, 0x09, 0x3b, 0x13, 0x79, 0x42, 0x1a, - 0x7f, 0x60, 0x23, 0x66, 0x5d, 0xee, 0x3e, 0xd4, 0xd7, 0x0c, 0xdf, 0xd0, 0xb7, 0x0d, 0x0f, 0xfe, - 0x7c, 0xc9, 0xd5, 0x9d, 0xd8, 0x5f, 0x2e, 0xc0, 0x19, 0x35, 0x84, 0x5f, 0xbf, 0x03, 0x77, 0xab, - 0x7b, 0xe0, 0x8e, 0x41, 0xf3, 0x64, 0xdf, 0x84, 0x11, 0x1e, 0x59, 0x69, 0x00, 0xb6, 0xe9, 0x09, - 0x33, 0x34, 0xa0, 0xba, 0xdc, 0x8d, 0xf0, 0x80, 0xdf, 0x63, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, - 0x34, 0x76, 0x49, 0xbc, 0xc8, 0xd9, 0x5c, 0x2c, 0xb8, 0x26, 0xeb, 0x01, 0xb9, 0xa1, 0x2c, 0x3e, - 0xeb, 0x22, 0x0c, 0xed, 0x04, 0x51, 0x9c, 0x56, 0x4b, 0x5f, 0x0b, 0xa2, 0x18, 0x33, 0x88, 0xfd, - 0x3b, 0x16, 0x0c, 0xb3, 0x3c, 0xd6, 0xfd, 0x32, 0xa9, 0x0f, 0xf2, 0x5d, 0xe8, 0x25, 0x18, 0x21, - 0x5b, 0x5b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x7e, 0xc4, 0x23, 0x2b, 0xac, 0x94, 0xb2, 0x0a, 0xac, - 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x07, 0xca, 0xb1, 0xd7, 0x22, 0x8b, 0xae, 0x2b, 0x14, 0x7b, - 0x0f, 0xe0, 0x0b, 0xbd, 0x21, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xa5, 0x02, 0x40, 0x12, 0x57, 0xa3, - 0xdf, 0x27, 0x2e, 0x75, 0xa9, 0x7c, 0x2e, 0x65, 0xa8, 0x7c, 0x50, 0x42, 0x30, 0x43, 0xdf, 0xa3, - 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x1d, 0x65, 0x98, 0x96, 0x61, 0x26, 0x89, 0x0b, 0x62, 0x86, - 0x45, 0x62, 0x4f, 0x9b, 0x8d, 0x34, 0x10, 0x77, 0xe3, 0xdb, 0x04, 0x2e, 0xaa, 0xf0, 0x08, 0xe2, - 0xae, 0x61, 0x76, 0xa3, 0x47, 0x48, 0xaa, 0x9f, 0xe8, 0xb4, 0x0a, 0xb9, 0x3a, 0xad, 0x1f, 0xb3, - 0xe0, 0x74, 0xba, 0x1d, 0xe6, 0xc8, 0xf7, 0x45, 0x0b, 0xce, 0x30, 0xcd, 0x1e, 0x6b, 0xb5, 0x5b, - 0x8f, 0xf8, 0x62, 0xcf, 0x90, 0x0f, 0x39, 0x3d, 0x4e, 0x1c, 0xd6, 0xd7, 0xb2, 0x48, 0xe3, 0xec, - 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x58, 0x11, 0xcc, 0xac, 0xdc, 0xb9, 0x57, 0xdf, 0x25, - 0x77, 0x85, 0xf1, 0x6e, 0x62, 0x56, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0x23, 0x2f, 0x17, 0x06, 0x8b, - 0xbc, 0x8c, 0x76, 0x60, 0xe6, 0xee, 0x0e, 0xf1, 0x6f, 0xf9, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, - 0x32, 0xa2, 0xf3, 0x75, 0xf3, 0x09, 0x69, 0x62, 0x7b, 0x27, 0x8d, 0x70, 0x78, 0x30, 0x7f, 0xde, - 0x28, 0x48, 0xba, 0xcc, 0x0f, 0x12, 0xdc, 0x4d, 0xb4, 0x3b, 0x70, 0xf5, 0xd0, 0x43, 0x0c, 0x5c, - 0x6d, 0x7f, 0xd1, 0x82, 0x73, 0xb9, 0x89, 0xe5, 0xd0, 0x65, 0x28, 0x39, 0x6d, 0x8f, 0x8b, 0x40, - 0xc5, 0x31, 0xca, 0x9e, 0xf2, 0xb5, 0x2a, 0x17, 0x80, 0x2a, 0xa8, 0x4a, 0x78, 0x5b, 0xc8, 0x4d, - 0x78, 0xdb, 0x37, 0x7f, 0xad, 0xfd, 0xdd, 0x16, 0x08, 0x97, 0xb8, 0x01, 0xce, 0xee, 0x37, 0x65, - 0xbe, 0x70, 0x23, 0xb9, 0xc5, 0xc5, 0x7c, 0x1f, 0x41, 0x91, 0xd2, 0x42, 0xf1, 0x4a, 0x46, 0x22, - 0x0b, 0x83, 0x96, 0xed, 0x82, 0x80, 0x56, 0x08, 0x13, 0x20, 0xf6, 0xef, 0xcd, 0xf3, 0x00, 0x2e, - 0xc3, 0xd5, 0xb2, 0x06, 0xab, 0x9b, 0xb9, 0xa2, 0x20, 0x58, 0xc3, 0xb2, 0xff, 0x6d, 0x01, 0xc6, - 0x64, 0x32, 0x85, 0x8e, 0x3f, 0xc8, 0x33, 0xff, 0x48, 0xd9, 0xd5, 0x58, 0x9a, 0x6d, 0x4a, 0xb8, - 0x96, 0x48, 0x47, 0x92, 0x34, 0xdb, 0x12, 0x80, 0x13, 0x1c, 0xba, 0x8b, 0xa2, 0xce, 0x26, 0x43, - 0x4f, 0x39, 0x70, 0xd5, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x69, 0x98, 0xe6, 0xf5, 0xc2, 0xa0, 0xed, - 0x6c, 0x73, 0xd9, 0xf2, 0xb0, 0xf2, 0xbc, 0x9e, 0x5e, 0x4b, 0xc1, 0x0e, 0x0f, 0xe6, 0x4f, 0xa7, - 0xcb, 0x98, 0xd2, 0xa4, 0x8b, 0x0a, 0x33, 0xc4, 0xe0, 0x8d, 0xd0, 0xdd, 0xdf, 0x65, 0xbf, 0x91, - 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0x1e, 0x50, 0x77, 0x5a, 0x09, 0xf4, 0x3a, 0xb7, 0xbe, 0xf3, 0x42, - 0xe2, 0xf6, 0x52, 0xa2, 0xe8, 0xfe, 0xc5, 0xd2, 0xf7, 0x82, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x57, - 0x8a, 0x30, 0x9d, 0xf6, 0x36, 0x45, 0xd7, 0x60, 0x84, 0xb3, 0x1e, 0x82, 0x7c, 0x0f, 0x1d, 0xbd, - 0xe6, 0xa3, 0xca, 0x0e, 0x61, 0xc1, 0xbd, 0x88, 0xfa, 0xe8, 0x2d, 0x18, 0x73, 0x83, 0xbb, 0xfe, - 0x5d, 0x27, 0x74, 0x17, 0x6b, 0x55, 0xb1, 0x9c, 0x33, 0xdf, 0x3d, 0x95, 0x04, 0x4d, 0xf7, 0x7b, - 0x65, 0xfa, 0xa8, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x45, 0xc1, 0xdd, 0xf2, 0xb6, 0xd7, 0x9c, - 0x76, 0x2f, 0x53, 0xec, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xa1, 0x72, 0x39, 0x00, 0x27, 0x84, - 0xd0, 0xb7, 0xc2, 0xa9, 0x28, 0x47, 0x54, 0x9a, 0x97, 0x65, 0xa8, 0x97, 0xf4, 0x70, 0xe9, 0x11, - 0xfa, 0x22, 0xcd, 0x12, 0xaa, 0x66, 0x35, 0x63, 0xff, 0xda, 0x29, 0x30, 0x36, 0xb1, 0x91, 0x74, - 0xce, 0x3a, 0xa6, 0xa4, 0x73, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x6b, - 0xe9, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0xce, 0x0c, 0x58, 0xfc, 0x1a, 0x66, - 0x06, 0x1c, 0x3a, 0xc1, 0xcc, 0x80, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0xc1, 0xf4, - 0x67, 0xae, 0xc3, 0xab, 0x1c, 0xa5, 0x3b, 0x07, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xae, 0x76, - 0xe0, 0x48, 0xfe, 0x9b, 0xb9, 0x5b, 0x99, 0x9c, 0xb9, 0x07, 0x45, 0xfe, 0xbf, 0xd1, 0x07, 0xcd, - 0xff, 0xb7, 0x2a, 0xb3, 0xf6, 0x95, 0xf2, 0xfd, 0x26, 0x58, 0x52, 0xbe, 0x3e, 0xb9, 0xfa, 0x6e, - 0xeb, 0x99, 0x0e, 0xcb, 0xf9, 0x27, 0x81, 0x4a, 0x62, 0x38, 0x60, 0x7e, 0xc3, 0xef, 0xb6, 0xe0, - 0x4c, 0x3b, 0x2b, 0xe9, 0xa7, 0xd0, 0xbb, 0xbe, 0x34, 0x70, 0x56, 0x53, 0xa3, 0x41, 0x26, 0x72, - 0xc9, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5d, 0x91, 0xa0, 0xef, 0x89, 0x9c, 0x44, - 0x89, 0x3d, 0xd2, 0x23, 0x6e, 0x64, 0x24, 0xe5, 0xfb, 0x70, 0x5e, 0x52, 0xbe, 0x81, 0x53, 0xf1, - 0xbd, 0xae, 0x52, 0x24, 0x4e, 0xe4, 0x2f, 0x25, 0x9e, 0x00, 0xb1, 0x6f, 0x62, 0xc4, 0xd7, 0x55, - 0x62, 0xc4, 0x1e, 0x11, 0x21, 0x79, 0xda, 0xc3, 0xbe, 0xe9, 0x10, 0xb5, 0x94, 0x86, 0x53, 0xc7, - 0x93, 0xd2, 0xd0, 0xb8, 0x6a, 0x78, 0x56, 0xbd, 0xa7, 0xfb, 0x5c, 0x35, 0x06, 0xdd, 0xde, 0x97, - 0x0d, 0x4f, 0xdf, 0x38, 0xf3, 0x40, 0xe9, 0x1b, 0x6f, 0xeb, 0xe9, 0x10, 0x51, 0x9f, 0x7c, 0x7f, - 0x14, 0x69, 0xc0, 0x24, 0x88, 0xb7, 0xf5, 0x0b, 0xf0, 0x54, 0x3e, 0x5d, 0x75, 0xcf, 0x75, 0xd3, - 0xcd, 0xbc, 0x02, 0xbb, 0x92, 0x2b, 0x9e, 0x3e, 0x99, 0xe4, 0x8a, 0x67, 0x8e, 0x3d, 0xb9, 0xe2, - 0xd9, 0x13, 0x48, 0xae, 0xf8, 0xc8, 0x09, 0x26, 0x57, 0xbc, 0xcd, 0x8c, 0x15, 0x78, 0x60, 0x11, - 0x11, 0xc1, 0x32, 0x3b, 0x5a, 0x62, 0x56, 0xf4, 0x11, 0xfe, 0x71, 0x0a, 0x84, 0x13, 0x52, 0x19, - 0x49, 0x1b, 0x67, 0x1f, 0x42, 0xd2, 0xc6, 0xf5, 0x24, 0x69, 0xe3, 0xb9, 0xfc, 0xa9, 0xce, 0x30, - 0x12, 0xcf, 0x49, 0xd5, 0x78, 0x5b, 0x4f, 0xb1, 0xf8, 0x68, 0x0f, 0xa1, 0x7a, 0x96, 0xe0, 0xb1, - 0x47, 0x62, 0xc5, 0xd7, 0x78, 0x62, 0xc5, 0xc7, 0xf2, 0x4f, 0xf2, 0xf4, 0x75, 0x67, 0xa6, 0x53, - 0xfc, 0xde, 0x02, 0x5c, 0xe8, 0xbd, 0x2f, 0x12, 0xa9, 0x67, 0x2d, 0xd1, 0xed, 0xa5, 0xa4, 0x9e, - 0xfc, 0x6d, 0x95, 0x60, 0x0d, 0x1c, 0x73, 0xea, 0x2a, 0xcc, 0x28, 0x2b, 0xf0, 0xa6, 0xd7, 0xd8, - 0xd7, 0x32, 0xc8, 0x2b, 0xcf, 0xd9, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, - 0x56, 0x2b, 0xe2, 0x0d, 0xa5, 0xc4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x59, 0xf0, - 0x48, 0x4e, 0xde, 0xa2, 0x81, 0x43, 0x2a, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0xf2, 0x9a, - 0x91, 0x1d, 0x49, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xbe, 0xa7, 0x41, - 0x16, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, - 0x26, 0x0d, 0x4d, 0x6e, 0xcd, 0x2c, 0x9b, 0xae, 0xae, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, - 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0x45, 0x67, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x97, - 0x61, 0x42, 0x19, 0x7a, 0x69, 0x33, 0xce, 0x0e, 0x60, 0xac, 0x03, 0xb0, 0x89, 0xb7, 0x74, 0xf9, - 0x37, 0x7e, 0xef, 0xc2, 0x87, 0x7e, 0xeb, 0xf7, 0x2e, 0x7c, 0xe8, 0xb7, 0x7f, 0xef, 0xc2, 0x87, - 0xbe, 0xfd, 0xfe, 0x05, 0xeb, 0x37, 0xee, 0x5f, 0xb0, 0x7e, 0xeb, 0xfe, 0x05, 0xeb, 0xb7, 0xef, - 0x5f, 0xb0, 0x7e, 0xf7, 0xfe, 0x05, 0xeb, 0x4b, 0xbf, 0x7f, 0xe1, 0x43, 0x6f, 0x16, 0xf6, 0x9e, - 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x11, 0xe2, 0x4d, 0x14, 0xfc, 0x00, 0x00, + // 13620 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0x59, + 0x5a, 0x18, 0xba, 0x59, 0xa5, 0x47, 0xd5, 0xa7, 0xf7, 0xe9, 0xc7, 0xa8, 0x35, 0xdd, 0xad, 0x9e, + 0x9c, 0xdd, 0x9e, 0x9e, 0x9d, 0x19, 0xf5, 0xce, 0x6b, 0x67, 0x98, 0x99, 0x1d, 0x90, 0x54, 0x52, + 0x77, 0x4d, 0xb7, 0xd4, 0x35, 0xa7, 0xd4, 0xdd, 0xbb, 0xc3, 0xec, 0xde, 0x4d, 0x55, 0x1e, 0x49, + 0x39, 0x2a, 0x65, 0xd6, 0x64, 0x66, 0x49, 0xad, 0xb9, 0x10, 0x97, 0xbb, 0x3c, 0xf7, 0x02, 0x37, + 0x36, 0x6c, 0xc2, 0x0f, 0x20, 0xb0, 0x03, 0xe3, 0x00, 0x0c, 0x76, 0x18, 0x83, 0x01, 0xef, 0x62, + 0x1b, 0x83, 0xed, 0xc0, 0xfe, 0x81, 0xb1, 0xc3, 0xf6, 0x12, 0x41, 0x58, 0x86, 0xc6, 0x61, 0x62, + 0x7f, 0x18, 0x08, 0x83, 0x7f, 0x58, 0x26, 0x8c, 0xe3, 0x3c, 0xf3, 0x9c, 0xac, 0xcc, 0xaa, 0x52, + 0x8f, 0x5a, 0x3b, 0x6c, 0xcc, 0xbf, 0xaa, 0xf3, 0x7d, 0xe7, 0x3b, 0x27, 0xcf, 0xf3, 0x3b, 0xdf, + 0x13, 0x5e, 0xdd, 0x7e, 0x39, 0x9a, 0xf3, 0x82, 0xab, 0xdb, 0xed, 0x75, 0x12, 0xfa, 0x24, 0x26, + 0xd1, 0xd5, 0x5d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0x96, 0x77, 0xb5, 0x11, 0x84, 0xe4, + 0xea, 0xee, 0xb3, 0x57, 0x37, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x5c, 0x2b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xe7, 0xb4, 0xbc, 0x39, 0x8a, 0x33, 0xb7, 0xfb, 0xec, 0xcc, 0x33, 0x9b, 0x5e, + 0xbc, 0xd5, 0x5e, 0x9f, 0x6b, 0x04, 0x3b, 0x57, 0x37, 0x83, 0xcd, 0xe0, 0x2a, 0x43, 0x5d, 0x6f, + 0x6f, 0xb0, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbc, 0x90, 0x34, 0xb3, 0xe3, 0x34, 0xb6, + 0x3c, 0x9f, 0x84, 0xfb, 0x57, 0x5b, 0xdb, 0x9b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x3b, 0x6c, 0x90, + 0x74, 0xc3, 0x5d, 0x6b, 0x45, 0x57, 0x77, 0x48, 0xec, 0x64, 0x74, 0x77, 0xe6, 0x6a, 0x5e, 0xad, + 0xb0, 0xed, 0xc7, 0xde, 0x4e, 0x67, 0x33, 0x9f, 0xec, 0x55, 0x21, 0x6a, 0x6c, 0x91, 0x1d, 0xa7, + 0xa3, 0xde, 0xf3, 0x79, 0xf5, 0xda, 0xb1, 0xd7, 0xbc, 0xea, 0xf9, 0x71, 0x14, 0x87, 0xe9, 0x4a, + 0xf6, 0x57, 0x2d, 0xb8, 0x34, 0x7f, 0xb7, 0xbe, 0xd4, 0x74, 0xa2, 0xd8, 0x6b, 0x2c, 0x34, 0x83, + 0xc6, 0x76, 0x3d, 0x0e, 0x42, 0x72, 0x27, 0x68, 0xb6, 0x77, 0x48, 0x9d, 0x0d, 0x04, 0x7a, 0x1a, + 0x4a, 0xbb, 0xec, 0x7f, 0xb5, 0x32, 0x6d, 0x5d, 0xb2, 0xae, 0x94, 0x17, 0x26, 0x7f, 0xe3, 0x60, + 0xf6, 0x23, 0xf7, 0x0f, 0x66, 0x4b, 0x77, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x0c, 0x6d, 0x44, + 0x6b, 0xfb, 0x2d, 0x32, 0x5d, 0x60, 0xb8, 0xe3, 0x02, 0x77, 0x68, 0xb9, 0x4e, 0x4b, 0xb1, 0x80, + 0xa2, 0xab, 0x50, 0x6e, 0x39, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0x4f, 0x17, 0x2f, 0x59, 0x57, 0x06, + 0x17, 0xa6, 0x04, 0x6a, 0xb9, 0x26, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf2, + 0x9b, 0xfb, 0xd3, 0x03, 0x97, 0xac, 0x2b, 0xa5, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, + 0x70, 0x01, 0x4a, 0xf3, 0x1b, 0x1b, 0x9e, 0xef, 0xc5, 0xfb, 0xe8, 0x0e, 0x8c, 0xfa, 0x81, 0x4b, + 0xe4, 0x7f, 0xf6, 0x15, 0x23, 0xcf, 0x5d, 0x9a, 0xeb, 0x5c, 0x4a, 0x73, 0xab, 0x1a, 0xde, 0xc2, + 0xe4, 0xfd, 0x83, 0xd9, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, 0xb2, + 0x05, 0x46, 0x76, 0x36, 0x8b, 0x6c, 0x2d, 0x41, 0x5b, 0x98, 0xb8, 0x7f, 0x30, 0x3b, 0xa2, 0x15, + 0x60, 0x9d, 0x08, 0x5a, 0x87, 0x09, 0xfa, 0xd7, 0x8f, 0x3d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x9e, + 0x47, 0x57, 0x43, 0x5d, 0x38, 0x75, 0xff, 0x60, 0x76, 0x22, 0x55, 0x88, 0xd3, 0x04, 0xed, 0xf7, + 0x60, 0x7c, 0x3e, 0x8e, 0x9d, 0xc6, 0x16, 0x71, 0xf9, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x77, 0x76, + 0x88, 0x98, 0xdf, 0x4b, 0x62, 0x60, 0x07, 0x56, 0x9d, 0x1d, 0x72, 0x78, 0x30, 0x3b, 0x79, 0xdb, + 0xf7, 0xde, 0x6d, 0x8b, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0xcf, 0x01, 0xb8, 0x64, 0xd7, 0x6b, + 0x90, 0x9a, 0x13, 0x6f, 0x89, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x3d, + 0x28, 0xcf, 0xef, 0x06, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0x6d, 0xc3, 0x44, 0x2b, 0x24, 0x1b, 0x24, + 0x54, 0x45, 0xd3, 0xd6, 0xa5, 0xe2, 0x95, 0x91, 0xe7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe4, + 0xc7, 0xe1, 0xfe, 0xc2, 0x23, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xbc, 0x00, + 0x67, 0xe6, 0xdf, 0x6b, 0x87, 0xa4, 0xe2, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a, + 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x19, 0x18, 0xa6, 0xbf, 0x6f, 0xe3, + 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xa4, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x2b, + 0x30, 0xd2, 0x60, 0x1b, 0x72, 0x73, 0x25, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xc2, 0x53, 0x14, 0x7d, + 0x31, 0x29, 0x3e, 0x3c, 0x98, 0x9d, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, + 0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a, + 0xbd, 0x4d, 0xd0, 0xb3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9, + 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0x66, 0xa7, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, + 0x60, 0x96, 0xc1, 0x96, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, + 0xcf, 0x01, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, + 0x7a, 0x20, 0x44, 0x5b, 0x4e, 0xc8, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0x27, + 0x38, 0xc6, 0x81, 0x50, 0xec, 0x75, 0x20, 0xa0, 0x4f, 0xc1, 0x44, 0xd2, 0x58, 0xd4, 0x72, 0x1a, + 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0x8e, 0x25, 0x16, 0x0f, 0xfd, + 0xea, 0x0f, 0xf8, 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xf0, 0x82, 0xe7, 0xbb, 0x9e, 0xbf, 0x89, 0x3e, + 0x0f, 0x25, 0x7a, 0x37, 0xb9, 0x4e, 0xec, 0x88, 0x73, 0xef, 0x13, 0xda, 0xde, 0x52, 0x57, 0xc5, + 0x5c, 0x6b, 0x7b, 0x93, 0x16, 0x44, 0x73, 0x14, 0x9b, 0xee, 0xb6, 0x5b, 0xeb, 0xef, 0x90, 0x46, + 0xbc, 0x42, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0xc5, 0x4e, 0xb8, + 0x49, 0x62, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x0d, 0x92, 0x5c, + 0x0b, 0x6b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1c, 0x86, 0x73, 0x8b, 0xf5, 0x6a, 0xce, 0xba, + 0xba, 0x0c, 0x43, 0x6e, 0xe8, 0xed, 0x92, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x61, 0xa5, 0x58, 0x40, + 0xd1, 0xcb, 0x30, 0xca, 0x2f, 0xa4, 0xeb, 0x8e, 0xef, 0x36, 0xe5, 0x10, 0x9f, 0x16, 0xd8, 0xa3, + 0x77, 0x34, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x45, + 0x0b, 0x26, 0x79, 0x33, 0xf3, 0x71, 0x1c, 0x7a, 0xeb, 0xed, 0x98, 0x44, 0xd3, 0x83, 0xec, 0xa4, + 0x5b, 0xcc, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbb, 0x93, 0xa2, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, + 0x64, 0x1a, 0x8c, 0x3b, 0x9a, 0x45, 0xdf, 0x69, 0xc1, 0x4c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, + 0x49, 0x58, 0x6b, 0xaf, 0x37, 0xbd, 0x68, 0x8b, 0xaf, 0x53, 0x4c, 0x36, 0xd8, 0x49, 0x90, 0x33, + 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xc5, 0xfb, 0x07, 0xb3, 0x33, 0x8b, 0xb9, 0xa4, 0x70, 0x97, 0x66, + 0xd0, 0x36, 0x20, 0x7a, 0x95, 0xd6, 0x63, 0x67, 0x93, 0x24, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xd9, + 0xfb, 0x07, 0xb3, 0x68, 0xb5, 0x83, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x85, 0xd3, 0xb4, 0xb4, 0xe3, + 0x5b, 0x4b, 0xfd, 0x37, 0x37, 0x7d, 0xff, 0x60, 0xf6, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, + 0x7d, 0x87, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0xba, 0xd7, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f, + 0xc3, 0xf4, 0x4c, 0x3e, 0xb7, 0x98, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x08, 0x67, 0x32, 0x57, + 0x0b, 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a, + 0xcd, 0xb6, 0xd8, 0x28, 0x98, 0xff, 0x79, 0xa5, 0xf0, 0xb2, 0x65, 0xff, 0x8b, 0x22, 0x4c, 0x2c, + 0xd6, 0xab, 0x0f, 0xb4, 0x0b, 0xf5, 0x6b, 0xa8, 0xd0, 0xf5, 0x1a, 0x4a, 0x2e, 0xb5, 0x62, 0xee, + 0xa5, 0xf6, 0xff, 0x64, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x29, 0x67, 0x0b, 0x1d, 0xf3, 0xc6, + 0xd9, 0xcd, 0x59, 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0x9b, 0x41, 0xc3, 0x69, 0xa6, 0x8f, + 0xbe, 0x23, 0x2e, 0xa5, 0xe3, 0x99, 0xc7, 0x06, 0x8c, 0x2e, 0x3a, 0x2d, 0x67, 0xdd, 0x6b, 0x7a, + 0xb1, 0x47, 0x22, 0xf4, 0x04, 0x14, 0x1d, 0xd7, 0x65, 0xdc, 0x56, 0x79, 0xe1, 0xcc, 0xfd, 0x83, + 0xd9, 0xe2, 0xbc, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0xfb, 0x98, 0x62, 0xa0, 0x8f, 0xc3, 0x80, 0x1b, + 0x06, 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x12, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1, + 0x7f, 0xb5, 0x00, 0xe7, 0x17, 0x49, 0x6b, 0x6b, 0xb9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e, + 0xe0, 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09, + 0x06, 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24, + 0x14, 0x2b, 0x46, 0x61, 0xdc, 0x8e, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, + 0xe8, 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b, + 0x8e, 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x29, + 0x00, 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46, + 0xef, 0x4f, 0x2d, 0x38, 0xbf, 0xe8, 0xf9, 0x2e, 0x09, 0x73, 0x16, 0xe0, 0xc3, 0x79, 0xcb, 0x1e, + 0x8d, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, + 0x70, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xb1, 0xe9, 0x11, + 0x3f, 0xae, 0xd6, 0x16, 0x03, 0x7f, 0xc3, 0xdb, 0x44, 0xaf, 0xc0, 0x78, 0xec, 0xed, 0x90, 0xa0, + 0x1d, 0xd7, 0x49, 0x23, 0xf0, 0xd9, 0x4b, 0xd2, 0xba, 0x32, 0xb8, 0x80, 0xee, 0x1f, 0xcc, 0x8e, + 0xaf, 0x19, 0x10, 0x9c, 0xc2, 0xb4, 0x7f, 0x87, 0x8e, 0x5f, 0xb0, 0xd3, 0x0a, 0x7c, 0xe2, 0xc7, + 0x8b, 0x81, 0xef, 0x72, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e, + 0x14, 0x3a, 0x0a, 0x87, 0x07, 0xb3, 0x67, 0x3b, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0x37, 0xc1, + 0x50, 0x14, 0x3b, 0x71, 0x3b, 0x12, 0xa3, 0xf9, 0x98, 0x1c, 0xcd, 0x3a, 0x2b, 0x3d, 0x3c, 0x98, + 0x9d, 0x50, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x93, 0x30, 0xbc, 0x43, 0xa2, 0xc8, 0xd9, 0x94, + 0xb7, 0xe1, 0x84, 0xa8, 0x3b, 0xbc, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0xc7, 0x61, 0x90, 0x84, 0x61, + 0x10, 0x8a, 0x3d, 0x3a, 0x26, 0x10, 0x07, 0x97, 0x68, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x58, 0x30, + 0xa1, 0xfa, 0xca, 0xdb, 0x3a, 0x81, 0x57, 0xc1, 0x5b, 0x00, 0x0d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, + 0x46, 0x9e, 0xbb, 0x9c, 0x79, 0x51, 0x77, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, + 0xff, 0x63, 0x0b, 0x4e, 0xa5, 0xbe, 0xe8, 0xa6, 0x17, 0xc5, 0xe8, 0xed, 0x8e, 0xaf, 0x9a, 0xeb, + 0xef, 0xab, 0x68, 0x6d, 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0xc3, 0xa0, 0x17, + 0x93, 0x1d, 0xf9, 0x31, 0x8f, 0x77, 0xfd, 0x18, 0xde, 0xab, 0x64, 0x46, 0xaa, 0xb4, 0x26, 0xe6, + 0x04, 0xec, 0xbf, 0x5c, 0x84, 0x32, 0x5f, 0xb6, 0x2b, 0x4e, 0xeb, 0x04, 0xe6, 0xa2, 0x0a, 0x03, + 0x8c, 0x3a, 0xef, 0xf8, 0x13, 0xd9, 0x1d, 0x17, 0xdd, 0x99, 0xa3, 0x4f, 0x7e, 0xce, 0x1c, 0xa9, + 0xab, 0x81, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0xeb, 0x9e, 0xef, 0x84, 0xfb, 0xb4, 0x6c, 0xba, + 0xc8, 0x08, 0x3e, 0xd3, 0x9d, 0xe0, 0x82, 0xc2, 0xe7, 0x64, 0x55, 0x5f, 0x13, 0x00, 0xd6, 0x88, + 0xce, 0xbc, 0x04, 0x65, 0x85, 0x7c, 0x14, 0x1e, 0x67, 0xe6, 0x53, 0x30, 0x91, 0x6a, 0xab, 0x57, + 0xf5, 0x51, 0x9d, 0x45, 0xfa, 0x32, 0x3b, 0x05, 0x44, 0xaf, 0x97, 0xfc, 0x5d, 0x71, 0x8a, 0xbe, + 0x07, 0xa7, 0x9b, 0x19, 0x87, 0x93, 0x98, 0xaa, 0xfe, 0x0f, 0xb3, 0xf3, 0xe2, 0xb3, 0x4f, 0x67, + 0x41, 0x71, 0x66, 0x1b, 0xf4, 0xda, 0x0f, 0x5a, 0x74, 0xcd, 0x3b, 0x4d, 0x9d, 0x83, 0xbe, 0x25, + 0xca, 0xb0, 0x82, 0xd2, 0x23, 0xec, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, 0xd2, 0x88, + 0x83, 0xf0, 0xeb, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x22, 0x08, 0x14, 0x6f, 0x90, + 0x7d, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0xbb, 0x7e, 0xdd, 0xcf, 0x59, 0x30, 0xa6, 0xbe, 0xee, 0x04, + 0xb6, 0xfa, 0x82, 0xb9, 0xd5, 0x2f, 0x74, 0x5d, 0xe0, 0x39, 0x9b, 0xfc, 0x2b, 0x05, 0x38, 0xa7, + 0x70, 0x28, 0xbb, 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0x12, 0x44, 0x59, 0xa6, 0x04, + 0x28, 0x11, 0x43, 0x25, 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x69, 0xd1, 0xa8, 0x2e, 0xa1, 0x15, 0xd2, + 0xd8, 0x05, 0x28, 0xb6, 0x3d, 0x57, 0xdc, 0x19, 0x9f, 0x90, 0xa3, 0x7d, 0xbb, 0x5a, 0x39, 0x3c, + 0x98, 0x7d, 0x2c, 0x4f, 0x3b, 0x40, 0x2f, 0xab, 0x68, 0xee, 0x76, 0xb5, 0x82, 0x69, 0x65, 0x34, + 0x0f, 0x13, 0x52, 0x01, 0x72, 0x87, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0x25, 0x66, 0xc5, 0x26, + 0x18, 0xa7, 0xf1, 0x51, 0x05, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x08, + 0x17, 0x42, 0x96, 0x93, 0xc7, 0xd6, 0x8d, 0x14, 0x1c, 0x77, 0xd4, 0xb0, 0xff, 0x9c, 0x1d, 0xf1, + 0x62, 0xf4, 0x6a, 0x61, 0x40, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, 0x8a, 0x1b, + 0x64, 0x7f, 0x2d, 0xa0, 0xcc, 0x76, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, 0xff, 0x0b, + 0x05, 0x38, 0xa3, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xa2, 0x8f, 0xc1, 0xb3, 0x30, 0xe2, 0x92, 0x0d, + 0xa7, 0xdd, 0x8c, 0x95, 0x44, 0x7c, 0x90, 0x6b, 0x45, 0x2a, 0x49, 0x31, 0xd6, 0x71, 0x8e, 0x30, + 0x6c, 0xff, 0x63, 0x84, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, 0x79, + 0x1c, 0x06, 0xbd, 0x1d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0xaa, 0x4a, 0x0b, 0x31, 0x87, 0xa1, 0x8f, + 0xc1, 0x70, 0x23, 0xd8, 0xd9, 0x71, 0x7c, 0x97, 0x5d, 0x79, 0xe5, 0x85, 0x11, 0xca, 0x8e, 0x2d, + 0xf2, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x38, 0xe1, 0x26, 0x17, 0x4b, 0x94, 0x17, 0x4a, 0xb4, + 0xa5, 0xf9, 0x70, 0x33, 0xc2, 0xac, 0x94, 0xbe, 0xaa, 0xf6, 0x82, 0x70, 0xdb, 0xf3, 0x37, 0x2b, + 0x5e, 0x28, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0, 0x32, 0x0c, 0xb6, 0x82, + 0x30, 0x8e, 0xa6, 0x87, 0xd8, 0x70, 0x3f, 0x96, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0x05, 0x61, 0x9c, + 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xef, 0x2e, 0x87, 0xc1, 0xce, + 0xf4, 0xa9, 0x7c, 0x4a, 0x4b, 0x1c, 0x85, 0x2f, 0xb3, 0x84, 0xed, 0x14, 0xc5, 0x58, 0x92, 0x40, + 0xdf, 0x04, 0x45, 0xe2, 0xef, 0x4e, 0x0f, 0x33, 0x4a, 0x33, 0x39, 0x94, 0xee, 0x38, 0x61, 0x72, + 0xe6, 0x2f, 0xf9, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x81, 0xb2, 0x3c, 0x30, 0x22, 0x21, 0x7f, 0xcb, + 0x5c, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0xdd, 0xb6, 0x17, 0x92, 0x1d, 0xe2, 0xc7, 0x51, 0x72, 0x42, + 0x4a, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0x33, 0x52, 0xe8, 0xbb, 0x12, 0xb4, 0xfd, 0x38, 0x9a, 0x2e, + 0xb3, 0xee, 0x65, 0xaa, 0xe3, 0xee, 0x24, 0x78, 0x69, 0xa9, 0x30, 0xaf, 0x8c, 0x0d, 0x52, 0xe8, + 0xb3, 0x30, 0xc6, 0xff, 0x73, 0xa5, 0x56, 0x34, 0x7d, 0x86, 0xd1, 0xbe, 0x94, 0x4f, 0x9b, 0x23, + 0x2e, 0x9c, 0x11, 0xc4, 0xc7, 0xf4, 0xd2, 0x08, 0x9b, 0xd4, 0x10, 0x86, 0xb1, 0xa6, 0xb7, 0x4b, + 0x7c, 0x12, 0x45, 0xb5, 0x30, 0x58, 0x27, 0xd3, 0xc0, 0x06, 0xe6, 0x5c, 0xb6, 0x12, 0x2c, 0x58, + 0x27, 0x0b, 0x53, 0x94, 0xe6, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x4e, 0x1f, 0x61, + 0x5e, 0x42, 0x74, 0xa4, 0x17, 0x51, 0xf6, 0x54, 0xc2, 0x46, 0x25, 0x9c, 0x22, 0x82, 0x6e, 0xc1, + 0x68, 0x14, 0x3b, 0x61, 0xdc, 0x6e, 0x71, 0xa2, 0x67, 0x7b, 0x11, 0x65, 0x3a, 0xd4, 0xba, 0x56, + 0x05, 0x1b, 0x04, 0xd0, 0x1b, 0x50, 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xdf, 0x68, 0x92, 0xe9, 0x51, + 0x46, 0x2d, 0xf3, 0x50, 0xb9, 0x29, 0x91, 0xf8, 0xab, 0x50, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x07, + 0xce, 0xc6, 0x24, 0xdc, 0xf1, 0x7c, 0x87, 0x1e, 0x06, 0xe2, 0xb5, 0xc4, 0x74, 0x93, 0x63, 0x6c, + 0xb7, 0x5d, 0x14, 0xb3, 0x71, 0x76, 0x2d, 0x13, 0x0b, 0xe7, 0xd4, 0x46, 0xf7, 0x60, 0x3a, 0x03, + 0x12, 0x34, 0xbd, 0xc6, 0xfe, 0xf4, 0x69, 0x46, 0xf9, 0x35, 0x41, 0x79, 0x7a, 0x2d, 0x07, 0xef, + 0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x82, 0x9d, 0x40, 0xb5, 0x76, 0xb3, 0x29, 0x1a, + 0x1c, 0x67, 0x0d, 0x7e, 0x4c, 0xde, 0xc7, 0x55, 0x13, 0x7c, 0x78, 0x30, 0x0b, 0xc9, 0x3f, 0x9c, + 0xae, 0x8d, 0xd6, 0x99, 0x1a, 0xac, 0x1d, 0x7a, 0xf1, 0x3e, 0x3d, 0x37, 0xc8, 0xbd, 0x78, 0x7a, + 0xa2, 0xab, 0x08, 0x42, 0x47, 0x55, 0xba, 0x32, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0x23, 0x35, 0x8a, + 0x5d, 0xcf, 0x9f, 0x9e, 0x64, 0x27, 0xb5, 0x3a, 0x91, 0xea, 0xb4, 0x10, 0x73, 0x18, 0x53, 0x81, + 0xd1, 0x1f, 0xb7, 0xe8, 0xcd, 0x35, 0xc5, 0x10, 0x13, 0x15, 0x98, 0x04, 0xe0, 0x04, 0x87, 0x32, + 0x93, 0x71, 0xbc, 0x3f, 0x8d, 0x18, 0xaa, 0x3a, 0x58, 0xd6, 0xd6, 0x3e, 0x83, 0x69, 0xb9, 0xbd, + 0x0e, 0xe3, 0xea, 0x20, 0x64, 0x63, 0x82, 0x66, 0x61, 0x90, 0xb1, 0x4f, 0x42, 0x60, 0x56, 0xa6, + 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xde, 0x7b, 0x64, 0x61, 0x3f, 0x26, 0xfc, 0x99, + 0x5e, 0xd4, 0xba, 0x20, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x37, 0x67, 0x43, 0x93, 0xd3, 0xb6, 0x8f, + 0xfb, 0xe5, 0x69, 0x28, 0x6d, 0x05, 0x51, 0x4c, 0xb1, 0x59, 0x1b, 0x83, 0x09, 0xe3, 0x79, 0x5d, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x85, 0xb1, 0x86, 0xde, 0x80, 0xb8, 0x1c, 0xd5, 0x31, 0x62, 0xb4, + 0x8e, 0x4d, 0x5c, 0xf4, 0x32, 0x94, 0x98, 0x59, 0x47, 0x23, 0x68, 0x0a, 0xae, 0x4d, 0xde, 0xf0, + 0xa5, 0x9a, 0x28, 0x3f, 0xd4, 0x7e, 0x63, 0x85, 0x8d, 0x2e, 0xc3, 0x10, 0xed, 0x42, 0xb5, 0x26, + 0xae, 0x25, 0x25, 0xfb, 0xb9, 0xce, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xa9, 0xa0, 0x8d, 0x32, 0x7d, + 0xe2, 0x12, 0x54, 0x83, 0xe1, 0x3d, 0xc7, 0x8b, 0x3d, 0x7f, 0x53, 0xf0, 0x1f, 0x4f, 0x76, 0xbd, + 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x61, 0xdb, + 0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0x6f, + 0x03, 0xc8, 0x1d, 0x46, 0x5c, 0x61, 0x4e, 0xf1, 0x74, 0x6f, 0xa2, 0x6b, 0xaa, 0xce, 0xc2, 0x38, + 0xbd, 0xa3, 0x93, 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x8c, 0x4f, 0xeb, 0xec, 0x0c, 0xfa, 0x56, 0xba, + 0xc4, 0x9d, 0x30, 0x26, 0xee, 0x7c, 0x2c, 0x06, 0xe7, 0xe3, 0xfd, 0x3d, 0x52, 0xd6, 0xbc, 0x1d, + 0xa2, 0x6f, 0x07, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x5f, 0x2a, 0xc2, 0x74, 0x5e, 0x77, 0xe9, 0xa2, + 0x23, 0xf7, 0xbc, 0x78, 0x91, 0xb2, 0x57, 0x96, 0xb9, 0xe8, 0x96, 0x44, 0x39, 0x56, 0x18, 0x74, + 0xf6, 0x23, 0x6f, 0x53, 0xbe, 0x31, 0x07, 0x93, 0xd9, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0x14, 0x2f, + 0x24, 0x4e, 0x24, 0xec, 0x75, 0xb4, 0x55, 0x82, 0x59, 0x29, 0x16, 0x50, 0x5d, 0x80, 0x35, 0xd0, + 0x43, 0x80, 0x65, 0x0c, 0xd1, 0xe0, 0xf1, 0x0e, 0x11, 0xfa, 0x1c, 0xc0, 0x86, 0xe7, 0x7b, 0xd1, + 0x16, 0xa3, 0x3e, 0x74, 0x64, 0xea, 0x8a, 0x39, 0x5b, 0x56, 0x54, 0xb0, 0x46, 0x11, 0xbd, 0x08, + 0x23, 0x6a, 0x03, 0x56, 0x2b, 0x4c, 0x79, 0xa9, 0x19, 0x83, 0x24, 0xa7, 0x51, 0x05, 0xeb, 0x78, + 0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, 0x3e, 0xbe, + 0xf6, 0xd7, 0x8a, 0x30, 0x61, 0x34, 0xd6, 0x8e, 0xfa, 0x38, 0xb3, 0xae, 0xd1, 0x03, 0xdc, 0x89, + 0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x73, 0x50, + 0x6e, 0x3a, 0x11, 0x13, 0x86, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x96, 0x3c, 0x4c, 0x9c, 0x28, 0xd6, + 0x6e, 0x4d, 0x4e, 0x3b, 0x21, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0x83, 0x30, 0xd5, 0x09, 0xca, + 0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa4, 0xdc, 0x1c, 0x5b, + 0x66, 0x83, 0x09, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0x93, 0xb7, 0xc1, 0x50, 0x97, 0xb7, 0xc1, + 0x93, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0xa7, 0x17, 0x4c, + 0xa9, 0xbf, 0x05, 0x43, 0x5f, 0x1f, 0x62, 0x51, 0x33, 0xc5, 0x71, 0x89, 0x9f, 0x72, 0x62, 0xc9, + 0x63, 0x09, 0xb3, 0x3f, 0x0e, 0xe3, 0x15, 0x87, 0xec, 0x04, 0xfe, 0x92, 0xef, 0xb6, 0x02, 0xcf, + 0x8f, 0xd1, 0x34, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, 0x3c, 0x40, 0x1f, 0x04, + 0xf6, 0x26, 0x9c, 0xa9, 0x04, 0x7b, 0xfe, 0x9e, 0x13, 0xba, 0xf3, 0xb5, 0xaa, 0xf6, 0xbe, 0x5e, + 0x95, 0xef, 0x3b, 0x6e, 0x87, 0x95, 0x79, 0xf4, 0x6a, 0x35, 0x39, 0x5b, 0xbb, 0xec, 0x35, 0x49, + 0x8e, 0x14, 0xe4, 0xaf, 0x16, 0x8c, 0x96, 0x12, 0x7c, 0xa5, 0xa8, 0xb2, 0x72, 0x15, 0x55, 0x6f, + 0x42, 0x69, 0xc3, 0x23, 0x4d, 0x17, 0x93, 0x0d, 0xb1, 0x12, 0x9f, 0xc8, 0x37, 0x2d, 0x59, 0xa6, + 0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xcb, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x36, 0x4c, 0xca, 0x07, + 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x64, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xf4, 0xfd, 0x83, 0xd9, 0x49, + 0x9c, 0x22, 0x83, 0x3b, 0x08, 0xd3, 0xe7, 0xe0, 0x0e, 0x3d, 0x81, 0x07, 0xd8, 0xf0, 0xb3, 0xe7, + 0x20, 0x7b, 0xd9, 0xb2, 0x52, 0xfb, 0x47, 0x2d, 0x78, 0xa4, 0x63, 0x64, 0xc4, 0x0b, 0xff, 0x98, + 0x67, 0x21, 0xfd, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x3f, 0x6b, 0xc1, 0xe9, 0xa5, 0x9d, 0x56, + 0xbc, 0x5f, 0xf1, 0x4c, 0xad, 0xd2, 0x4b, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, 0x6e, + 0x56, 0x9e, 0x52, 0x2b, 0xac, 0xf4, 0xf0, 0x60, 0x76, 0xac, 0x1e, 0x07, 0xa1, 0xb3, 0x49, 0x78, + 0x01, 0x16, 0xe8, 0xec, 0xac, 0xf7, 0xde, 0x23, 0x37, 0xbd, 0x1d, 0x4f, 0x9a, 0x0a, 0x75, 0x95, + 0xd9, 0xcd, 0xc9, 0x01, 0x9d, 0x7b, 0xb3, 0xed, 0xf8, 0xb1, 0x17, 0xef, 0x0b, 0x85, 0x90, 0x24, + 0x82, 0x13, 0x7a, 0xf6, 0x57, 0x2d, 0x98, 0x90, 0xeb, 0x7e, 0xde, 0x75, 0x43, 0x12, 0x45, 0x68, + 0x06, 0x0a, 0x5e, 0x4b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xd5, 0x1a, 0x2e, 0x78, 0x2d, 0xc9, 0x96, + 0xb1, 0x83, 0xb0, 0x68, 0xea, 0xc6, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x28, 0xf9, 0x81, + 0xcb, 0xcd, 0xb5, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xaa, 0x28, 0xc3, 0x0a, 0x8a, 0x6a, 0x50, 0xe6, + 0x96, 0x4c, 0xc9, 0xa2, 0xed, 0xcb, 0x1e, 0x8a, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x13, 0x22, 0xf6, + 0x0f, 0x58, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x84, 0xdf, 0x4c, 0xb6, 0x16, + 0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x61, 0x15, 0xed, 0x1f, 0x29, 0xc0, 0xb8, 0xec, + 0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, 0x3c, + 0x5b, 0x28, 0x60, 0xcc, 0x4f, 0x72, 0x7b, 0xcf, 0xcb, 0xda, 0x38, 0x21, 0x84, 0x9a, 0x30, 0xe5, + 0x07, 0x31, 0x3b, 0xc9, 0x15, 0xbc, 0x9b, 0xea, 0x25, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, 0x9a, + 0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x25, 0x29, 0x68, 0x29, 0xe6, 0xbf, 0xec, 0xf5, 0x59, 0xc8, 0x96, + 0xb3, 0xd8, 0xbf, 0x62, 0x41, 0x59, 0xa2, 0x9d, 0x84, 0x96, 0x6d, 0x05, 0x86, 0x23, 0x36, 0x09, + 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0x5c, 0x50, 0xfc, 0x7f, 0x84, 0x25, 0x0d, 0x26, + 0x67, 0x57, 0xdd, 0xff, 0x80, 0xc8, 0xd9, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, 0xd6, + 0x04, 0x57, 0x94, 0x8f, 0x6a, 0x85, 0x64, 0xc3, 0xbb, 0x97, 0xe6, 0xa3, 0x6a, 0xac, 0x14, 0x0b, + 0x28, 0x7a, 0x1b, 0x46, 0x1b, 0x52, 0xc0, 0x9a, 0x6c, 0xd7, 0xcb, 0x5d, 0x85, 0xfd, 0x4a, 0x2f, + 0xc4, 0x05, 0x1b, 0x8b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xd5, 0xfc, 0xc5, 0x5e, 0x6a, 0xfe, 0x84, + 0x6e, 0xbe, 0xd2, 0xfb, 0xc7, 0x2c, 0x18, 0xe2, 0x82, 0xb5, 0xfe, 0xe4, 0x9a, 0x9a, 0x9a, 0x2c, + 0x19, 0xbb, 0x3b, 0xb4, 0x50, 0xa8, 0xbd, 0xd0, 0x0a, 0x94, 0xd9, 0x0f, 0x26, 0x18, 0x2c, 0xe6, + 0x5b, 0xc5, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, 0xc1, 0xfe, 0xa1, 0x22, 0x3d, + 0xaa, 0x12, 0x54, 0xe3, 0x06, 0xb7, 0x1e, 0xde, 0x0d, 0x5e, 0x78, 0x58, 0x37, 0xf8, 0x26, 0x4c, + 0x34, 0x34, 0xa5, 0x5a, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0x4d, 0xff, 0xc6, 0x45, 0x26, 0x8b, + 0x26, 0x11, 0x9c, 0xa6, 0x8a, 0xbe, 0x15, 0x46, 0xf9, 0x3c, 0x8b, 0x56, 0xb8, 0xa5, 0xc4, 0xc7, + 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x17, 0xb1, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe, 0x63, 0x0b, 0xd0, + 0x52, 0x6b, 0x8b, 0xec, 0x90, 0xd0, 0x69, 0x26, 0xb2, 0xf1, 0xff, 0xcf, 0x82, 0x69, 0xd2, 0x51, + 0xbc, 0x18, 0xec, 0xec, 0x88, 0x17, 0x48, 0xce, 0x23, 0x79, 0x29, 0xa7, 0x8e, 0x72, 0x1b, 0x98, + 0xce, 0xc3, 0xc0, 0xb9, 0xed, 0xa1, 0x15, 0x38, 0xc5, 0xaf, 0x3c, 0x05, 0xd0, 0x6c, 0xa3, 0x1f, + 0x15, 0x84, 0x4f, 0xad, 0x75, 0xa2, 0xe0, 0xac, 0x7a, 0xf6, 0x77, 0x8d, 0x42, 0x6e, 0x2f, 0x3e, + 0x54, 0x0a, 0x7c, 0xa8, 0x14, 0xf8, 0x50, 0x29, 0xf0, 0xa1, 0x52, 0xe0, 0x43, 0xa5, 0xc0, 0x37, + 0xbc, 0x52, 0xe0, 0x0f, 0x2d, 0x38, 0xd5, 0x79, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x86, 0x53, 0x9d, + 0x77, 0x5d, 0x57, 0x3b, 0xb8, 0xce, 0x7e, 0x26, 0xf7, 0x5e, 0xc6, 0x37, 0xe0, 0x2c, 0xfa, 0xf6, + 0x2f, 0x95, 0x60, 0x70, 0x69, 0x97, 0xf8, 0xf1, 0x09, 0x7c, 0x62, 0x03, 0xc6, 0x3d, 0x7f, 0x37, + 0x68, 0xee, 0x12, 0x97, 0xc3, 0x8f, 0xf2, 0xde, 0x3d, 0x2b, 0x48, 0x8f, 0x57, 0x0d, 0x12, 0x38, + 0x45, 0xf2, 0x61, 0xc8, 0x9c, 0xaf, 0xc1, 0x10, 0xbf, 0x1d, 0x84, 0xc0, 0x39, 0xf3, 0x32, 0x60, + 0x83, 0x28, 0xee, 0xbc, 0x44, 0x1e, 0xce, 0x6f, 0x1f, 0x51, 0x1d, 0xbd, 0x03, 0xe3, 0x1b, 0x5e, + 0x18, 0xc5, 0x6b, 0xde, 0x0e, 0x89, 0x62, 0x67, 0xa7, 0xf5, 0x00, 0x32, 0x66, 0x35, 0x0e, 0xcb, + 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0xac, 0xe9, 0xe8, 0x4d, 0x0d, 0x1f, 0xb9, 0x29, 0x75, + 0xed, 0xdc, 0xd4, 0x09, 0x61, 0x93, 0x2e, 0xdd, 0xa7, 0x0d, 0x26, 0x26, 0x2d, 0x31, 0xe1, 0x81, + 0xda, 0xa7, 0x5c, 0x3e, 0xca, 0x61, 0x94, 0x83, 0x62, 0x96, 0xb1, 0x65, 0x93, 0x83, 0xd2, 0xec, + 0x5f, 0x3f, 0x0f, 0x65, 0x42, 0x87, 0x90, 0x12, 0x16, 0x37, 0xd7, 0xd5, 0xfe, 0xfa, 0xba, 0xe2, + 0x35, 0xc2, 0xc0, 0x94, 0xee, 0x2f, 0x49, 0x4a, 0x38, 0x21, 0x8a, 0x16, 0x61, 0x28, 0x22, 0xa1, + 0x47, 0x22, 0x71, 0x87, 0x75, 0x99, 0x46, 0x86, 0xc6, 0x9d, 0x4a, 0xf8, 0x6f, 0x2c, 0xaa, 0xd2, + 0xe5, 0xe5, 0x30, 0xc1, 0x27, 0xbb, 0x65, 0xb4, 0xe5, 0x35, 0xcf, 0x4a, 0xb1, 0x80, 0xa2, 0x37, + 0x60, 0x38, 0x24, 0x4d, 0xa6, 0x3e, 0x1a, 0xeb, 0x7f, 0x91, 0x73, 0x6d, 0x14, 0xaf, 0x87, 0x25, + 0x01, 0x74, 0x03, 0x50, 0x48, 0x28, 0x07, 0xe6, 0xf9, 0x9b, 0xca, 0x5e, 0x54, 0x9c, 0xe0, 0x6a, + 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0xf7, 0xe0, 0x8c, 0x6a, 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xaa, 0x1f, + 0xc5, 0x0e, 0x3d, 0x39, 0x27, 0x18, 0x2d, 0x25, 0x00, 0xc1, 0x69, 0x04, 0xdc, 0x59, 0xc7, 0xfe, + 0x69, 0x0b, 0xf8, 0x38, 0x9f, 0xc0, 0xb3, 0xff, 0x75, 0xf3, 0xd9, 0x7f, 0x2e, 0x77, 0xe6, 0x72, + 0x9e, 0xfc, 0xf7, 0x2d, 0x18, 0xd1, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x97, 0x35, 0xdb, 0x86, 0x49, + 0xba, 0xd2, 0x6f, 0xad, 0x47, 0x24, 0xdc, 0x25, 0x2e, 0x5b, 0x98, 0x85, 0x07, 0x5b, 0x98, 0xca, + 0x90, 0xed, 0x66, 0x8a, 0x20, 0xee, 0x68, 0x02, 0xbd, 0x24, 0x75, 0x29, 0x45, 0xc3, 0x0e, 0x9c, + 0xeb, 0x49, 0x0e, 0x0f, 0x66, 0x27, 0xb5, 0x0f, 0xd1, 0x75, 0x27, 0xf6, 0xe7, 0xe5, 0x37, 0x2a, + 0x83, 0xc1, 0x86, 0x5a, 0x2c, 0x29, 0x83, 0x41, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0xe8, 0x56, + 0x10, 0xc5, 0x69, 0x83, 0xc1, 0xeb, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x9f, 0x07, 0x58, 0xba, 0x47, + 0x1a, 0x7c, 0xa9, 0xeb, 0xcf, 0x19, 0x2b, 0xff, 0x39, 0x63, 0xff, 0x3b, 0x0b, 0xc6, 0x97, 0x17, + 0x0d, 0x89, 0xf0, 0x1c, 0x00, 0x7f, 0x83, 0xdd, 0xbd, 0xbb, 0x2a, 0xb5, 0xed, 0x5c, 0x61, 0xaa, + 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0x74, 0x72, 0x98, 0x5e, 0xd8, 0x37, + 0xdb, 0x3e, 0xa6, 0x65, 0x9a, 0x13, 0x42, 0xb1, 0x6f, 0x27, 0x84, 0x9e, 0xc1, 0x00, 0xd0, 0x2c, + 0x0c, 0xee, 0xed, 0x79, 0x2e, 0x77, 0xb9, 0x14, 0x96, 0x00, 0x77, 0xef, 0x56, 0x2b, 0x11, 0xe6, + 0xe5, 0xf6, 0x97, 0x8a, 0x30, 0xb3, 0xdc, 0x24, 0xf7, 0xde, 0xa7, 0xdb, 0x69, 0xbf, 0x2e, 0x14, + 0x47, 0x13, 0x0d, 0x1d, 0xd5, 0x4d, 0xa6, 0xf7, 0x78, 0x6c, 0xc0, 0x30, 0xb7, 0x97, 0x93, 0x4e, + 0xa8, 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x73, 0xdc, 0xee, 0x4e, 0xf8, 0xd0, 0xa9, 0x9b, 0x56, + 0x94, 0x62, 0x49, 0x7c, 0xe6, 0x15, 0x18, 0xd5, 0x31, 0x8f, 0xe4, 0xb0, 0xf6, 0xff, 0x16, 0x61, + 0x92, 0xf6, 0xe0, 0xa1, 0x4e, 0xc4, 0xed, 0xce, 0x89, 0x38, 0x6e, 0xa7, 0xa5, 0xde, 0xb3, 0xf1, + 0x76, 0x7a, 0x36, 0x9e, 0xcd, 0x9b, 0x8d, 0x93, 0x9e, 0x83, 0xef, 0xb4, 0xe0, 0xd4, 0x72, 0x33, + 0x68, 0x6c, 0xa7, 0x1c, 0x8b, 0x5e, 0x84, 0x11, 0x7a, 0x8e, 0x47, 0x86, 0xcf, 0xbb, 0x11, 0x05, + 0x41, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x7d, 0xbb, 0x5a, 0xc9, 0x0a, 0x9e, 0x20, 0x40, 0x58, + 0xc7, 0xb3, 0x7f, 0xd3, 0x82, 0x0b, 0xd7, 0x16, 0x97, 0x92, 0xa5, 0xd8, 0x11, 0xbf, 0xe1, 0x32, + 0x0c, 0xb5, 0x5c, 0xad, 0x2b, 0x89, 0xc0, 0xb7, 0xc2, 0x7a, 0x21, 0xa0, 0x1f, 0x94, 0xd8, 0x24, + 0x3f, 0x65, 0xc1, 0xa9, 0x6b, 0x5e, 0x4c, 0xaf, 0xe5, 0x74, 0x24, 0x01, 0x7a, 0x2f, 0x47, 0x5e, + 0x1c, 0x84, 0xfb, 0xe9, 0x48, 0x02, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf5, 0x98, 0xa5, + 0x76, 0xc1, 0xd4, 0x63, 0x61, 0x51, 0x8e, 0x15, 0x06, 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xd4, 0x70, + 0x5f, 0x9c, 0xb0, 0xea, 0xc3, 0x2a, 0x12, 0x80, 0x13, 0x1c, 0xfa, 0x80, 0x9a, 0xbd, 0xd6, 0x6c, + 0x47, 0x31, 0x09, 0x37, 0xa2, 0x9c, 0xd3, 0xf1, 0x79, 0x28, 0x13, 0x29, 0xa3, 0x17, 0xbd, 0x56, + 0xac, 0xa6, 0x12, 0xde, 0xf3, 0x80, 0x06, 0x0a, 0xaf, 0x0f, 0x37, 0xc5, 0xa3, 0xf9, 0x99, 0x2d, + 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x84, 0x07, 0xe6, 0x2a, 0xbe, 0xd4, 0x01, 0xc5, 0x19, 0x35, 0xec, + 0x1f, 0xb5, 0xe0, 0x8c, 0xfa, 0xe0, 0x0f, 0xdc, 0x67, 0xda, 0x3f, 0x5f, 0x80, 0xb1, 0xeb, 0x6b, + 0x6b, 0xb5, 0x6b, 0x24, 0x16, 0xd7, 0x76, 0x6f, 0x35, 0x3a, 0xd6, 0xb4, 0x81, 0xdd, 0x5e, 0x81, + 0xed, 0xd8, 0x6b, 0xce, 0xf1, 0x40, 0x41, 0x73, 0x55, 0x3f, 0xbe, 0x15, 0xd6, 0xe3, 0xd0, 0xf3, + 0x37, 0x33, 0xf5, 0x87, 0x92, 0xb9, 0x28, 0xe6, 0x31, 0x17, 0xe8, 0x79, 0x18, 0x62, 0x91, 0x8a, + 0xe4, 0x24, 0x3c, 0xaa, 0x1e, 0x51, 0xac, 0xf4, 0xf0, 0x60, 0xb6, 0x7c, 0x1b, 0x57, 0xf9, 0x1f, + 0x2c, 0x50, 0xd1, 0x6d, 0x18, 0xd9, 0x8a, 0xe3, 0xd6, 0x75, 0xe2, 0xb8, 0xf4, 0xb5, 0xcc, 0x8f, + 0xc3, 0x8b, 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a, + 0x76, 0x1d, 0x20, 0x81, 0x1d, 0x93, 0xee, 0xc4, 0xfe, 0x7d, 0x0b, 0x86, 0x79, 0xd0, 0x88, 0x10, + 0xbd, 0x06, 0x03, 0xe4, 0x1e, 0x69, 0x08, 0x56, 0x39, 0xb3, 0xc3, 0x09, 0xa7, 0xc5, 0x65, 0xc0, + 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x75, 0x18, 0xa6, 0xbd, 0xbd, 0xa6, 0x22, 0x68, 0x3c, 0x96, 0xf7, + 0xc5, 0x6a, 0xda, 0x39, 0x73, 0x26, 0x8a, 0xb0, 0xac, 0xce, 0xb4, 0xcf, 0x8d, 0x56, 0x9d, 0x9e, + 0xd8, 0x71, 0x37, 0xc6, 0x62, 0x6d, 0xb1, 0xc6, 0x91, 0x04, 0x35, 0xae, 0x7d, 0x96, 0x85, 0x38, + 0x21, 0x62, 0xaf, 0x41, 0x99, 0x4e, 0xea, 0x7c, 0xd3, 0x73, 0xba, 0x2b, 0xd4, 0x9f, 0x82, 0xb2, + 0x54, 0x97, 0x47, 0xc2, 0x59, 0x9c, 0x51, 0x95, 0xda, 0xf4, 0x08, 0x27, 0x70, 0x7b, 0x03, 0x4e, + 0x33, 0xe3, 0x47, 0x27, 0xde, 0x32, 0xf6, 0x58, 0xef, 0xc5, 0xfc, 0xb4, 0x78, 0x79, 0xf2, 0x99, + 0x99, 0xd6, 0xfc, 0x31, 0x47, 0x25, 0xc5, 0xe4, 0x15, 0x6a, 0x7f, 0x6d, 0x00, 0x1e, 0xad, 0xd6, + 0xf3, 0xe3, 0x89, 0xbc, 0x0c, 0xa3, 0x9c, 0x2f, 0xa5, 0x4b, 0xdb, 0x69, 0x8a, 0x76, 0x95, 0xf0, + 0x77, 0x4d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x80, 0xa2, 0xf7, 0xae, 0x9f, 0x76, 0x6d, 0xaa, 0xbe, + 0xb9, 0x8a, 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61, + 0xdc, 0x8b, 0x1a, 0x91, 0x57, 0xf5, 0xe9, 0x39, 0xa3, 0x9d, 0x54, 0x4a, 0x2a, 0x42, 0x3b, 0xad, + 0xa0, 0x38, 0x85, 0xad, 0x5d, 0x64, 0x83, 0x7d, 0xb3, 0xc9, 0x3d, 0xbd, 0xa7, 0xe9, 0x0b, 0xa0, + 0xc5, 0xbe, 0x2e, 0x62, 0x52, 0x7c, 0xf1, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x4f, 0xce, + 0xc6, 0x96, 0xd3, 0x9a, 0x6f, 0xc7, 0x5b, 0x15, 0x2f, 0x6a, 0x04, 0xbb, 0x24, 0xdc, 0x67, 0xd2, + 0x82, 0x52, 0xf2, 0xe4, 0x54, 0x80, 0xc5, 0xeb, 0xf3, 0x35, 0x8a, 0x89, 0x3b, 0xeb, 0xa0, 0x79, + 0x98, 0x90, 0x85, 0x75, 0x12, 0xb1, 0x2b, 0x6c, 0x84, 0x91, 0x51, 0xce, 0x46, 0xa2, 0x58, 0x11, + 0x49, 0xe3, 0x9b, 0x9c, 0x34, 0x1c, 0x07, 0x27, 0xfd, 0x12, 0x8c, 0x79, 0xbe, 0x17, 0x7b, 0x4e, + 0x1c, 0x70, 0x15, 0x14, 0x17, 0x0c, 0x30, 0xd9, 0x7a, 0x55, 0x07, 0x60, 0x13, 0xcf, 0xfe, 0x2f, + 0x03, 0x30, 0xc5, 0xa6, 0xed, 0xc3, 0x15, 0xf6, 0x8d, 0xb4, 0xc2, 0x6e, 0x77, 0xae, 0xb0, 0xe3, + 0x78, 0x22, 0x3c, 0xf0, 0x32, 0x7b, 0x07, 0xca, 0xca, 0xbf, 0x4a, 0x3a, 0x58, 0x5a, 0x39, 0x0e, + 0x96, 0xbd, 0xb9, 0x0f, 0x69, 0xa2, 0x56, 0xcc, 0x34, 0x51, 0xfb, 0xeb, 0x16, 0x24, 0x3a, 0x15, + 0x74, 0x1d, 0xca, 0xad, 0x80, 0x59, 0x5e, 0x86, 0xd2, 0x9c, 0xf9, 0xd1, 0xcc, 0x8b, 0x8a, 0x5f, + 0x8a, 0xfc, 0xe3, 0x6b, 0xb2, 0x06, 0x4e, 0x2a, 0xa3, 0x05, 0x18, 0x6e, 0x85, 0xa4, 0x1e, 0xb3, + 0xb0, 0x22, 0x3d, 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x82, 0x05, 0xc0, 0xad, + 0xc0, 0x1c, 0x7f, 0x93, 0x9c, 0x80, 0xb8, 0xbb, 0x02, 0x03, 0x51, 0x8b, 0x34, 0xba, 0xd9, 0xc4, + 0x26, 0xfd, 0xa9, 0xb7, 0x48, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x77, 0x03, 0x8c, + 0x27, 0x68, 0xd5, 0x98, 0xec, 0xa0, 0x67, 0x8c, 0x30, 0x03, 0xe7, 0x52, 0x61, 0x06, 0xca, 0x0c, + 0x5b, 0x93, 0xac, 0xbe, 0x03, 0xc5, 0x1d, 0xe7, 0x9e, 0x10, 0x9d, 0x3d, 0xd5, 0xbd, 0x1b, 0x94, + 0xfe, 0xdc, 0x8a, 0x73, 0x8f, 0x3f, 0x12, 0x9f, 0x92, 0x0b, 0x64, 0xc5, 0xb9, 0x77, 0xc8, 0x2d, + 0x5f, 0xd9, 0x21, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xe7, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, 0x11, + 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0xbe, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, 0xdf, 0x47, 0x5b, + 0x9e, 0x8f, 0xde, 0x83, 0x61, 0x61, 0x7f, 0x28, 0xc2, 0xfa, 0x5c, 0xed, 0xa3, 0x3d, 0x61, 0xbe, + 0xc8, 0xdb, 0xbc, 0x2a, 0x1f, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x57, 0x2c, 0x18, + 0x17, 0xbf, 0x31, 0x79, 0xb7, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xec, 0xbf, 0x0f, 0xa2, 0x22, + 0xef, 0xca, 0x27, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0xf7, 0x2c, 0x38, + 0xbd, 0xe3, 0xdc, 0xe3, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x54, 0xff, 0xaf, 0xf5, 0x37, + 0xfd, 0x1d, 0xd5, 0x79, 0x27, 0xa5, 0x7e, 0xf2, 0x74, 0x16, 0x4a, 0xcf, 0xae, 0x66, 0xf6, 0x6b, + 0x66, 0x03, 0x4a, 0x72, 0xbd, 0x65, 0x88, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x64, 0xf3, 0x4f, 0xdd, + 0xd7, 0x9f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x1d, 0x18, 0xd5, 0xd7, 0xd8, 0x43, 0x6d, + 0xeb, 0x5d, 0x38, 0x95, 0xb1, 0x96, 0x1e, 0x6a, 0x93, 0x7b, 0x70, 0x2e, 0x77, 0x7d, 0x3c, 0xcc, + 0x86, 0xed, 0x9f, 0xb7, 0xf4, 0x73, 0xf0, 0x04, 0x74, 0x0e, 0x8b, 0xa6, 0xce, 0xe1, 0x62, 0xf7, + 0x9d, 0x93, 0xa3, 0x78, 0x78, 0x5b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0x1b, 0x30, 0xd4, 0xa4, 0x25, + 0xd2, 0xf0, 0xd5, 0xee, 0xbd, 0x23, 0x13, 0x5e, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, 0x7f, 0xd9, + 0x82, 0x81, 0x13, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x33, 0xb9, 0xa4, 0x45, 0xc4, 0xe1, 0x39, 0xec, + 0xec, 0x2d, 0xdd, 0x8b, 0x89, 0x1f, 0xb1, 0xa7, 0x62, 0xe6, 0xc0, 0xfc, 0x5f, 0x70, 0xea, 0x66, + 0xe0, 0xb8, 0x0b, 0x4e, 0xd3, 0xf1, 0x1b, 0x24, 0xac, 0xfa, 0x9b, 0x47, 0xb2, 0xc0, 0x2e, 0xf4, + 0xb2, 0xc0, 0xb6, 0xb7, 0x00, 0xe9, 0x0d, 0x08, 0x57, 0x16, 0x0c, 0xc3, 0x1e, 0x6f, 0x4a, 0x0c, + 0xff, 0x13, 0xd9, 0xac, 0x59, 0x47, 0xcf, 0x34, 0x27, 0x0d, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x65, + 0xc8, 0xf4, 0x87, 0xef, 0x2d, 0x36, 0xb0, 0x3f, 0x03, 0x53, 0xac, 0xe6, 0x11, 0x9f, 0xb4, 0x76, + 0x4a, 0x2a, 0x99, 0x11, 0xfc, 0xce, 0xfe, 0xa2, 0x05, 0x13, 0xab, 0xa9, 0x98, 0x60, 0x97, 0x99, + 0x02, 0x34, 0x43, 0x18, 0x5e, 0x67, 0xa5, 0x58, 0x40, 0x8f, 0x5d, 0x06, 0xf5, 0xe7, 0x16, 0x24, + 0x21, 0x2a, 0x4e, 0x80, 0xf1, 0x5a, 0x34, 0x18, 0xaf, 0x4c, 0xd9, 0x88, 0xea, 0x4e, 0x1e, 0xdf, + 0x85, 0x6e, 0xa8, 0x78, 0x4c, 0x5d, 0xc4, 0x22, 0x09, 0x19, 0x1e, 0xbd, 0x67, 0xdc, 0x0c, 0xda, + 0x24, 0x23, 0x34, 0xd9, 0xff, 0xb1, 0x00, 0x48, 0xe1, 0xf6, 0x1d, 0x2f, 0xaa, 0xb3, 0xc6, 0xf1, + 0xc4, 0x8b, 0xda, 0x05, 0xc4, 0x54, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, 0x27, 0xa4, 0x6e, 0x47, + 0xb3, 0x0f, 0x98, 0x11, 0x4d, 0xa2, 0x9b, 0x1d, 0xd4, 0x70, 0x46, 0x0b, 0x9a, 0x69, 0xc6, 0x60, + 0xbf, 0xa6, 0x19, 0x43, 0x3d, 0xdc, 0xd5, 0x7e, 0xce, 0x82, 0x31, 0x35, 0x4c, 0x1f, 0x10, 0xfb, + 0x73, 0xd5, 0x9f, 0x9c, 0xa3, 0xaf, 0xa6, 0x75, 0x99, 0x5d, 0x09, 0xdf, 0xcc, 0xdc, 0x0e, 0x9d, + 0xa6, 0xf7, 0x1e, 0x51, 0xd1, 0xfa, 0x66, 0x85, 0x1b, 0xa1, 0x28, 0x3d, 0x3c, 0x98, 0x1d, 0x53, + 0xff, 0x78, 0x74, 0xe0, 0xa4, 0x8a, 0xfd, 0x13, 0x74, 0xb3, 0x9b, 0x4b, 0x11, 0xbd, 0x08, 0x83, + 0xad, 0x2d, 0x27, 0x22, 0x29, 0xa7, 0x9b, 0xc1, 0x1a, 0x2d, 0x3c, 0x3c, 0x98, 0x1d, 0x57, 0x15, + 0x58, 0x09, 0xe6, 0xd8, 0xfd, 0x47, 0xe1, 0xea, 0x5c, 0x9c, 0x3d, 0xa3, 0x70, 0xfd, 0xb1, 0x05, + 0x03, 0xab, 0x81, 0x7b, 0x12, 0x47, 0xc0, 0xeb, 0xc6, 0x11, 0x70, 0x3e, 0x2f, 0x70, 0x7b, 0xee, + 0xee, 0x5f, 0x4e, 0xed, 0xfe, 0x8b, 0xb9, 0x14, 0xba, 0x6f, 0xfc, 0x1d, 0x18, 0x61, 0xe1, 0xe0, + 0x85, 0x83, 0xd1, 0xf3, 0xc6, 0x86, 0x9f, 0x4d, 0x6d, 0xf8, 0x09, 0x0d, 0x55, 0xdb, 0xe9, 0x4f, + 0xc2, 0xb0, 0x70, 0x72, 0x49, 0x7b, 0x6f, 0x0a, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, + 0x3f, 0x8f, 0x7e, 0xc5, 0x82, 0xb9, 0x90, 0x1b, 0xbf, 0xba, 0x95, 0x76, 0xe8, 0xf9, 0x9b, 0xf5, + 0xc6, 0x16, 0x71, 0xdb, 0x4d, 0xcf, 0xdf, 0xac, 0x6e, 0xfa, 0x81, 0x2a, 0x5e, 0xba, 0x47, 0x1a, + 0x6d, 0xa6, 0xbe, 0xea, 0x11, 0xeb, 0x5e, 0x19, 0x91, 0x3f, 0x77, 0xff, 0x60, 0x76, 0x0e, 0x1f, + 0x89, 0x36, 0x3e, 0x62, 0x5f, 0xd0, 0x6f, 0x5a, 0x70, 0x95, 0x47, 0x65, 0xef, 0xbf, 0xff, 0x5d, + 0xde, 0xb9, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0x85, 0x97, 0xc4, 0x80, 0x5e, 0xad, + 0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xac, 0x08, 0x63, 0x22, 0xb4, 0x93, 0xb8, 0x03, + 0x5e, 0x34, 0x96, 0xc4, 0x63, 0xa9, 0x25, 0x31, 0x65, 0x20, 0x1f, 0xcf, 0xf1, 0x1f, 0xc1, 0x14, + 0x3d, 0x9c, 0xaf, 0x13, 0x27, 0x8c, 0xd7, 0x89, 0xc3, 0x2d, 0xae, 0x8a, 0x47, 0x3e, 0xfd, 0x95, + 0x60, 0xed, 0x66, 0x9a, 0x18, 0xee, 0xa4, 0xff, 0x8d, 0x74, 0xe7, 0xf8, 0x30, 0xd9, 0x11, 0x9d, + 0xeb, 0x2d, 0x28, 0x2b, 0x0f, 0x0d, 0x71, 0xe8, 0x74, 0x0f, 0x72, 0x97, 0xa6, 0xc0, 0x85, 0x5f, + 0x89, 0x77, 0x50, 0x42, 0xce, 0xfe, 0xfb, 0x05, 0xa3, 0x41, 0x3e, 0x89, 0xab, 0x50, 0x72, 0xa2, + 0xc8, 0xdb, 0xf4, 0x89, 0x2b, 0x76, 0xec, 0x47, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0xf3, 0x92, 0x99, + 0x17, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, 0xbb, 0xb6, 0x5d, 0xf9, 0x52, 0xeb, 0x8f, 0x1a, 0x48, + 0xcb, 0xb7, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x59, 0x6e, 0x78, 0x78, 0xc3, 0x0f, 0xf6, 0xfc, 0x6b, + 0x41, 0x20, 0xc3, 0x27, 0xf4, 0x47, 0x70, 0x4a, 0x9a, 0x1b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, + 0x04, 0xcb, 0x6f, 0x83, 0x53, 0x94, 0xb4, 0xe9, 0xdd, 0x1c, 0x21, 0x02, 0x13, 0x22, 0x6e, 0x98, + 0x2c, 0x13, 0x63, 0x97, 0xf9, 0x08, 0x33, 0x6b, 0x27, 0x12, 0xe0, 0x1b, 0x26, 0x09, 0x9c, 0xa6, + 0x69, 0xff, 0xa4, 0x05, 0xcc, 0xd3, 0xf3, 0x04, 0xf8, 0x91, 0x4f, 0x99, 0xfc, 0xc8, 0x74, 0xde, + 0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf0, 0x95, 0x55, 0x0b, 0x83, 0x7b, 0xfb, 0xc2, 0xe8, 0xa3, 0xf7, + 0xfb, 0xc3, 0xfe, 0x5f, 0x16, 0x3f, 0xc4, 0x94, 0xff, 0x04, 0xfa, 0x76, 0x28, 0x35, 0x9c, 0x96, + 0xd3, 0xe0, 0xb9, 0x52, 0x72, 0x65, 0x71, 0x46, 0xa5, 0xb9, 0x45, 0x51, 0x83, 0xcb, 0x96, 0x64, + 0xfc, 0xb9, 0x92, 0x2c, 0xee, 0x29, 0x4f, 0x52, 0x4d, 0xce, 0x6c, 0xc3, 0x98, 0x41, 0xec, 0xa1, + 0x0a, 0x22, 0xbe, 0x9d, 0x5f, 0xb1, 0x2a, 0x5e, 0xe2, 0x0e, 0x4c, 0xf9, 0xda, 0x7f, 0x7a, 0xa1, + 0xc8, 0xc7, 0xe5, 0x47, 0x7b, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0xfc, 0x4e, 0x53, 0x64, 0x70, 0x27, + 0x65, 0xfb, 0xc7, 0x2d, 0x78, 0x44, 0x47, 0xd4, 0x5c, 0x5b, 0x7a, 0x49, 0xf7, 0x2b, 0x50, 0x0a, + 0x5a, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xb7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, 0xf9, 0xa1, 0x88, + 0x34, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xd7, 0x27, 0x1b, 0x8c, 0x48, 0x38, 0x31, 0xb1, + 0x33, 0x80, 0x29, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0xd7, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77, + 0x61, 0x72, 0xc7, 0x89, 0x1b, 0x5b, 0x4b, 0xf7, 0x5a, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xaa, + 0xd7, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa4, 0x88, 0xe1, 0x0e, 0xf2, 0x68, 0x1d, 0x46, + 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0xba, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0x95, 0x84, 0x0e, + 0xd6, 0x89, 0xda, 0x3f, 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc2, 0x70, 0x2b, 0x70, 0x17, + 0xab, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x81, 0x92, 0xf8, + 0x29, 0x75, 0x5b, 0xec, 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x2b, 0x0c, 0x76, + 0x3d, 0x97, 0x05, 0x81, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0xc6, + 0xda, 0x7e, 0xc4, 0xd9, 0x11, 0x67, 0x5d, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x75, 0x20, + 0x36, 0x71, 0xd1, 0x3c, 0x0c, 0xc5, 0x0e, 0x33, 0x5b, 0x19, 0xcc, 0xb7, 0xb7, 0x5d, 0xa3, 0x18, + 0x7a, 0x5a, 0x0e, 0x5a, 0x01, 0x8b, 0x8a, 0xe8, 0x2d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, + 0xde, 0xdf, 0x25, 0xa0, 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x05, 0x80, 0xdc, 0x8b, + 0x49, 0xe8, 0x3b, 0x4d, 0x65, 0x15, 0xa6, 0xf8, 0x82, 0x4a, 0xb0, 0x1a, 0xc4, 0xb7, 0x23, 0xb2, + 0xa4, 0x30, 0xb0, 0x86, 0x6d, 0xff, 0x66, 0x19, 0x20, 0xe1, 0xdb, 0xd1, 0x7b, 0x1d, 0x07, 0xd7, + 0xd3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b, 0xb5, 0xd0, 0xf7, 0x58, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3, + 0x89, 0xd9, 0x0c, 0x15, 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xf9, 0xa4, 0x06, 0xef, 0xc2, 0xf3, 0x72, + 0x85, 0x6a, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x90, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, + 0xa9, 0x58, 0x66, 0x77, 0x84, 0xfe, 0x4a, 0xbc, 0x6d, 0xbc, 0x12, 0x07, 0xf2, 0x7d, 0x01, 0x0d, + 0xf6, 0xb5, 0xd7, 0x03, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0x83, 0xf9, 0x8e, 0x77, 0xda, 0x3b, 0xa9, + 0x47, 0x4c, 0x80, 0x77, 0x60, 0xc2, 0x35, 0x99, 0x00, 0xb1, 0x12, 0x9f, 0xc8, 0xa3, 0x9b, 0xe2, + 0x19, 0x92, 0x6b, 0x3f, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xc6, 0x63, 0x3e, 0x54, 0xfd, 0x8d, 0x40, + 0x38, 0x5b, 0xd8, 0xb9, 0x73, 0xb9, 0x1f, 0xc5, 0x64, 0x87, 0x62, 0x26, 0xb7, 0xfb, 0xaa, 0xa8, + 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, 0xcf, 0xab, 0x68, 0xba, 0x94, 0x2f, 0x2b, 0x36, 0x83, + 0x98, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, + 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7c, 0x34, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xbb, 0x8c, + 0x9a, 0x94, 0x8b, 0x12, 0xff, 0x65, 0x4e, 0xb0, 0x69, 0xc8, 0xef, 0x9e, 0x99, 0x37, 0x2c, 0x19, + 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x39, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0xaf, 0xb3, + 0x83, 0x3f, 0xc4, 0xd9, 0x6d, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xec, 0xc1, 0x8c, 0x0f, 0x93, + 0xe9, 0x2d, 0xfa, 0x50, 0xd9, 0x91, 0xdf, 0x1f, 0x80, 0x71, 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, + 0x44, 0x54, 0x1c, 0x7f, 0xb5, 0x4b, 0x56, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7d, 0x03, 0xab, 0xae, + 0x99, 0xd9, 0x26, 0xe9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x61, 0xb5, 0x1e, 0x04, 0xb1, 0xba, + 0x90, 0xd4, 0xba, 0x5b, 0x60, 0xa5, 0x58, 0x40, 0xe9, 0x45, 0xb4, 0x4d, 0x42, 0x9f, 0x34, 0xcd, + 0xf0, 0xc0, 0xea, 0x22, 0xba, 0xa1, 0x03, 0xb1, 0x89, 0x4b, 0xaf, 0xd3, 0x20, 0x62, 0x0b, 0x59, + 0x3c, 0xdf, 0x12, 0xb3, 0xe5, 0x3a, 0x77, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0x85, 0x40, + 0xc2, 0x5c, 0x0f, 0x21, 0x5b, 0x1c, 0x32, 0xa4, 0x2d, 0x8f, 0x2c, 0x66, 0xa3, 0xe1, 0xbc, 0xfa, + 0xe8, 0x75, 0x18, 0x17, 0x2c, 0xbe, 0xa4, 0x38, 0x6c, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, + 0x2d, 0x03, 0x1c, 0x33, 0x2e, 0x5b, 0x52, 0x28, 0x75, 0x06, 0x38, 0xd6, 0xe1, 0xb8, 0xa3, 0x06, + 0x9a, 0x87, 0x09, 0xce, 0x83, 0x79, 0xfe, 0x26, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, + 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x65, 0x18, 0x75, 0xc2, 0xc6, 0x96, 0x17, 0x93, 0x46, 0xdc, 0x0e, + 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0x34, 0xaf, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x62, + 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3c, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xf9, 0x5a, 0x55, 0x7e, 0x8d, + 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x00, 0x54, 0xab, 0x73, 0x59, 0x02, 0x70, 0x82, + 0x63, 0xff, 0xf7, 0x02, 0x4c, 0x64, 0xe8, 0x56, 0x58, 0x1a, 0xba, 0xd4, 0x23, 0x25, 0xc9, 0x3a, + 0x67, 0xc6, 0xcb, 0x2e, 0x1c, 0x21, 0x5e, 0x76, 0xb1, 0x57, 0xbc, 0xec, 0x81, 0xf7, 0x13, 0x2f, + 0xdb, 0x1c, 0xb1, 0xc1, 0xbe, 0x46, 0x2c, 0x23, 0xc6, 0xf6, 0xd0, 0x11, 0x63, 0x6c, 0x1b, 0x83, + 0x3e, 0xdc, 0xc7, 0xa0, 0xff, 0x50, 0x01, 0x26, 0xd3, 0x36, 0x90, 0x27, 0x20, 0xb7, 0x7d, 0xc3, + 0x90, 0xdb, 0x66, 0x27, 0x75, 0x4c, 0x5b, 0x66, 0xe6, 0xc9, 0x70, 0x71, 0x4a, 0x86, 0xfb, 0xf1, + 0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xad, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6c, 0x3a, 0xde, 0xce, + 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, + 0x6a, 0x80, 0xae, 0xf6, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x62, + 0xcf, 0x65, 0x43, 0xec, 0xf9, 0x5c, 0x4a, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1, + 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa0, 0x0c, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x1b, + 0x49, 0xf6, 0xf9, 0xaf, 0x2c, 0x38, 0x97, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x35, 0x65, 0x5d, + 0x4f, 0xf6, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x3e, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b, + 0x46, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x4a, 0xe0, 0xaa, 0x90, 0xc0, 0xcf, 0xb0, 0x77, 0x56, 0x52, + 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x16, 0x4a, 0x91, 0xb8, + 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0xeb, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x92, 0x0a, + 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0xcf, 0x01, 0xec, 0xaa, 0xc7, 0x40, 0x5a, + 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x2d, 0x30, 0x19, 0xf1, 0xa0, 0x7e, 0x8b, 0x4d, 0x27, + 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x60, 0xa3, 0x65, 0xd9, + 0x2a, 0x8b, 0x40, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x04, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, + 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, + 0x2a, 0x6e, 0xa6, 0x55, 0x2e, 0xf3, 0x4d, 0xad, 0x28, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00, + 0x3c, 0xda, 0xe5, 0x8c, 0x44, 0xf3, 0xa6, 0x1e, 0xf6, 0xa9, 0xf4, 0xe3, 0x7a, 0x26, 0xb3, 0xb2, + 0xf1, 0xda, 0x4e, 0x2d, 0xc5, 0xc2, 0xfb, 0x5e, 0x8a, 0xdf, 0x6f, 0x69, 0x62, 0x0f, 0x6e, 0xab, + 0xf9, 0xa9, 0x23, 0x9e, 0xfd, 0xc7, 0x28, 0x07, 0xd9, 0xc8, 0x10, 0x26, 0x3c, 0xd7, 0x77, 0x77, + 0xfa, 0x96, 0x2e, 0x9c, 0xac, 0x94, 0xf8, 0x0b, 0x16, 0x3c, 0x96, 0xd9, 0x5f, 0xc3, 0x22, 0xe7, + 0x2a, 0x94, 0x1b, 0xb4, 0x50, 0x73, 0x45, 0x4c, 0x7c, 0xb4, 0x25, 0x00, 0x27, 0x38, 0x86, 0xe1, + 0x4d, 0xa1, 0xa7, 0xe1, 0xcd, 0x3f, 0xb5, 0xa0, 0x63, 0x7f, 0x9c, 0xc0, 0x41, 0x5d, 0x35, 0x0f, + 0xea, 0x8f, 0xf6, 0x33, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc0, 0xd9, 0x1c, 0x57, 0x9c, 0x5d, + 0x98, 0xda, 0x6c, 0x10, 0xd3, 0xc9, 0x53, 0x7c, 0x4c, 0xa6, 0x3f, 0x6c, 0x57, 0x8f, 0x50, 0x96, + 0xd1, 0x72, 0xaa, 0x03, 0x05, 0x77, 0x36, 0x81, 0xbe, 0x60, 0xc1, 0x69, 0x67, 0x2f, 0xea, 0x48, + 0x81, 0x2f, 0xd6, 0xcc, 0x0b, 0x99, 0x42, 0x90, 0x1e, 0x29, 0xf3, 0x79, 0x8a, 0xcf, 0x2c, 0x2c, + 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x48, 0x3c, 0x65, 0xe7, 0xbb, 0xb8, 0x21, 0x67, 0xf9, 0x4c, 0xf1, + 0x1b, 0x44, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x4d, 0xe9, 0xc8, 0x98, 0x71, 0x43, 0x25, + 0x03, 0xd9, 0xdd, 0xbd, 0x93, 0x6b, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f, + 0x44, 0xdd, 0xb2, 0x64, 0xa6, 0x4c, 0xd6, 0xb8, 0xb3, 0xff, 0xea, 0x72, 0x1d, 0xd3, 0x8a, 0xe8, + 0x3a, 0x14, 0xc3, 0x75, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x50, 0xc9, 0xe9, 0x15, 0xa3, + 0x84, 0x17, 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x64, 0xfe, 0x2b, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, + 0xbb, 0xf8, 0x81, 0xf1, 0x88, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x0d, 0x86, 0x1a, 0x2c, 0xa3, + 0xa2, 0x88, 0x47, 0xf6, 0x89, 0x4c, 0x59, 0x5d, 0x97, 0x54, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, + 0xa0, 0xc5, 0xa8, 0x92, 0xd6, 0xd6, 0x46, 0x24, 0x32, 0x00, 0x67, 0x53, 0xed, 0x92, 0x41, 0x55, + 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xa3, 0x21, 0x7c, 0x53, 0x32, 0x85, 0x76, + 0x66, 0xbc, 0x86, 0x85, 0xa1, 0xfb, 0x07, 0xb3, 0x85, 0xe5, 0x45, 0x5c, 0xd8, 0x68, 0xa0, 0x55, + 0x18, 0xde, 0xe0, 0x1e, 0xde, 0x42, 0x2e, 0xf7, 0x44, 0xb6, 0xf3, 0x79, 0x87, 0x13, 0x38, 0x77, + 0xcb, 0x10, 0x00, 0x2c, 0x89, 0xb0, 0x98, 0xeb, 0xca, 0x53, 0x5d, 0x84, 0xee, 0x9a, 0x3b, 0x5a, + 0x74, 0x01, 0x7e, 0x3f, 0x27, 0xfe, 0xee, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xb0, 0x8b, + 0x50, 0x2c, 0x99, 0xab, 0xba, 0x47, 0x86, 0x7a, 0xbe, 0xaa, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x6d, + 0x18, 0xdb, 0x8d, 0x5a, 0x5b, 0x44, 0x6e, 0x69, 0x16, 0x99, 0x25, 0xe7, 0x0a, 0xbb, 0x23, 0x10, + 0xbd, 0x30, 0x6e, 0x3b, 0xcd, 0x8e, 0x53, 0x88, 0xa9, 0xbf, 0xef, 0xe8, 0xc4, 0xb0, 0x49, 0x9b, + 0x0e, 0xff, 0xbb, 0xed, 0x60, 0x7d, 0x3f, 0x26, 0x22, 0xe2, 0x56, 0xe6, 0xf0, 0xbf, 0xc9, 0x51, + 0x3a, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x88, 0xe1, 0x61, 0xa7, 0xe7, 0x64, 0x7e, 0x58, + 0xcc, 0x79, 0x89, 0x94, 0x33, 0x28, 0xec, 0xb4, 0x4c, 0x48, 0xb1, 0x53, 0xb2, 0xb5, 0x15, 0xc4, + 0x81, 0x9f, 0x3a, 0xa1, 0xa7, 0xf2, 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xe7, 0x29, 0x99, 0x85, 0x85, + 0x33, 0xdb, 0x42, 0x2e, 0x8c, 0xb7, 0x82, 0x30, 0xde, 0x0b, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xb9, + 0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xec, 0x4c, 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x86, 0xe1, 0xa8, + 0xe1, 0x34, 0x49, 0xf5, 0xd6, 0xf4, 0xa9, 0xfc, 0xeb, 0xa7, 0xce, 0x51, 0x72, 0x56, 0x17, 0x0f, + 0xd0, 0xce, 0x51, 0xb0, 0x24, 0x87, 0x96, 0x61, 0x90, 0xe5, 0xd4, 0x62, 0xe1, 0xe1, 0x72, 0xa2, + 0x7b, 0x76, 0x18, 0x10, 0xf3, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xc1, 0x5e, 0x07, + 0xd1, 0xf4, 0x99, 0xfc, 0x3d, 0x20, 0xb8, 0xf2, 0x5b, 0xf5, 0x6e, 0x7b, 0x40, 0x21, 0xe1, 0x84, + 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xb6, 0x8b, 0xe5, 0x4b, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, + 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0e, 0x77, 0xf2, 0x2c, 0xec, 0x41, 0xf6, 0x5d, 0x56, 0x87, 0xae, + 0xee, 0x93, 0xfd, 0xca, 0x87, 0x8e, 0x91, 0x5b, 0xfd, 0x82, 0x05, 0x67, 0x5b, 0x99, 0x1f, 0x22, + 0x18, 0x80, 0xfe, 0xc4, 0x4c, 0xfc, 0xd3, 0x55, 0x28, 0xc1, 0x6c, 0x38, 0xce, 0x69, 0x29, 0xfd, + 0x22, 0x28, 0xbe, 0xef, 0x17, 0xc1, 0x0a, 0x94, 0x18, 0x93, 0xd9, 0x23, 0xc3, 0x70, 0xfa, 0x61, + 0xc4, 0x58, 0x89, 0x45, 0x51, 0x11, 0x2b, 0x12, 0xe8, 0x07, 0x2c, 0xb8, 0x90, 0xee, 0x3a, 0x26, + 0x0c, 0x2c, 0xe2, 0x0f, 0xf2, 0xb7, 0xe0, 0xb2, 0xf8, 0xfe, 0x0b, 0xb5, 0x6e, 0xc8, 0x87, 0xbd, + 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x25, 0xe3, 0x31, 0x3a, 0x64, 0x0a, 0xe0, 0xfb, 0x78, 0x90, 0xbe, + 0x00, 0xa3, 0x3b, 0x41, 0xdb, 0x8f, 0x85, 0xa1, 0x8c, 0x50, 0xda, 0x33, 0x65, 0xf5, 0x8a, 0x56, + 0x8e, 0x0d, 0xac, 0xd4, 0x33, 0xb6, 0xf4, 0xc0, 0xcf, 0xd8, 0xb7, 0x61, 0xd4, 0xd7, 0x2c, 0x3b, + 0x05, 0x3f, 0x70, 0x39, 0x3f, 0x76, 0xa8, 0x6e, 0x07, 0xca, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, + 0xc9, 0xbe, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xf9, 0x6b, 0xf9, 0x35, 0xf3, 0xb5, 0x7c, 0x39, + 0xfd, 0x5a, 0xee, 0x10, 0xbe, 0x1a, 0x0f, 0xe5, 0xfe, 0xf3, 0x9c, 0xf4, 0x1b, 0x26, 0xd0, 0x6e, + 0xc2, 0xa5, 0x5e, 0xd7, 0x12, 0xb3, 0x98, 0x72, 0x95, 0xaa, 0x2d, 0xb1, 0x98, 0x72, 0xab, 0x15, + 0xcc, 0x20, 0xfd, 0xc6, 0x91, 0xb1, 0xff, 0x9b, 0x05, 0xc5, 0x5a, 0xe0, 0x9e, 0x80, 0x30, 0xf9, + 0x53, 0x86, 0x30, 0xf9, 0xd1, 0xec, 0x0b, 0xd1, 0xcd, 0x15, 0x1d, 0x2f, 0xa5, 0x44, 0xc7, 0x17, + 0xf2, 0x08, 0x74, 0x17, 0x14, 0xff, 0x44, 0x11, 0x46, 0x6a, 0x81, 0xab, 0xcc, 0x95, 0x7f, 0xfd, + 0x41, 0xcc, 0x95, 0x73, 0x03, 0xfc, 0x6b, 0x94, 0x99, 0xa1, 0x95, 0xf4, 0xb1, 0xfc, 0x0b, 0x66, + 0xb5, 0x7c, 0x97, 0x78, 0x9b, 0x5b, 0x31, 0x71, 0xd3, 0x9f, 0x73, 0x72, 0x56, 0xcb, 0xff, 0xd5, + 0x82, 0x89, 0x54, 0xeb, 0xa8, 0x09, 0x63, 0x4d, 0x5d, 0x30, 0x29, 0xd6, 0xe9, 0x03, 0xc9, 0x34, + 0x85, 0xd5, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x0e, 0x40, 0x69, 0xea, 0xa4, 0x04, 0x8c, 0x71, + 0xfd, 0x4a, 0x95, 0x17, 0x61, 0x0d, 0x03, 0xbd, 0x08, 0x23, 0x71, 0xd0, 0x0a, 0x9a, 0xc1, 0xe6, + 0xfe, 0x0d, 0x22, 0x23, 0x17, 0x29, 0x5b, 0xae, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, 0xa7, 0x8a, + 0xfc, 0x43, 0xfd, 0xd8, 0xfb, 0x70, 0x4d, 0x7e, 0xb0, 0xd7, 0xe4, 0x57, 0x2d, 0x98, 0xa4, 0xad, + 0x33, 0x73, 0x11, 0x79, 0xd9, 0xaa, 0x98, 0xc1, 0x56, 0x97, 0x98, 0xc1, 0x97, 0xe9, 0xd9, 0xe5, + 0x06, 0xed, 0x58, 0x48, 0xd0, 0xb4, 0xc3, 0x89, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, + 0x8b, 0x9b, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, 0x0c, 0x29, 0x3c, 0x90, 0x1d, 0x52, 0x98, 0xc7, + 0x61, 0x14, 0x86, 0x05, 0x82, 0xed, 0xd1, 0xe2, 0x30, 0x4a, 0x8b, 0x83, 0x04, 0xc7, 0xfe, 0xf9, + 0x22, 0x8c, 0xd6, 0x02, 0x37, 0xd1, 0x95, 0xbd, 0x60, 0xe8, 0xca, 0x2e, 0xa5, 0x74, 0x65, 0x93, + 0x3a, 0xee, 0x87, 0x9a, 0xb1, 0xaf, 0x97, 0x66, 0xec, 0x9f, 0x58, 0x6c, 0xd6, 0x2a, 0xab, 0x75, + 0x6e, 0x7d, 0x84, 0x9e, 0x85, 0x11, 0x76, 0x20, 0x31, 0x9f, 0x4a, 0xa9, 0x40, 0x62, 0x29, 0x94, + 0x56, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27, + 0xb4, 0x3d, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x01, 0x58, 0xcc, 0xf7, 0xd1, 0xd2, 0xfb, + 0xc3, 0xb7, 0x48, 0x7e, 0xdc, 0x3f, 0xfb, 0x2e, 0xa0, 0x4e, 0xfc, 0x3e, 0x62, 0x5f, 0xcd, 0x9a, + 0xb1, 0xaf, 0xca, 0x1d, 0x71, 0xaf, 0xfe, 0xcc, 0x82, 0xf1, 0x5a, 0xe0, 0xd2, 0xad, 0xfb, 0x8d, + 0xb4, 0x4f, 0xf5, 0xf8, 0xa7, 0x43, 0x5d, 0xe2, 0x9f, 0x3e, 0x0e, 0x83, 0xb5, 0xc0, 0xad, 0xd6, + 0xba, 0xf9, 0x36, 0xdb, 0x7f, 0xdb, 0x82, 0xe1, 0x5a, 0xe0, 0x9e, 0x80, 0x70, 0xfe, 0x35, 0x53, + 0x38, 0xff, 0x48, 0xce, 0xba, 0xc9, 0x91, 0xc7, 0xff, 0xcd, 0x01, 0x18, 0xa3, 0xfd, 0x0c, 0x36, + 0xe5, 0x54, 0x1a, 0xc3, 0x66, 0xf5, 0x31, 0x6c, 0x94, 0x17, 0x0e, 0x9a, 0xcd, 0x60, 0x2f, 0x3d, + 0xad, 0xcb, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x4a, 0xad, 0x90, 0xec, 0x7a, 0x81, 0x60, 0x32, + 0x35, 0x55, 0x47, 0x4d, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0x91, 0xe7, 0x37, 0x48, 0x9d, 0x34, + 0x02, 0xdf, 0xe5, 0xf2, 0xeb, 0xa2, 0x48, 0x1b, 0xa0, 0x95, 0x63, 0x03, 0x0b, 0xdd, 0x85, 0x32, + 0xfb, 0xcf, 0x8e, 0x9d, 0xa3, 0x67, 0x93, 0x14, 0xd9, 0xc5, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0xcf, + 0x01, 0xc4, 0x32, 0x42, 0x76, 0x24, 0xe2, 0x1c, 0x29, 0x86, 0x5c, 0xc5, 0xce, 0x8e, 0xb0, 0x86, + 0x85, 0x9e, 0x82, 0x72, 0xec, 0x78, 0xcd, 0x9b, 0x9e, 0x4f, 0x22, 0x26, 0x97, 0x2e, 0xca, 0x24, + 0x5f, 0xa2, 0x10, 0x27, 0x70, 0xca, 0x10, 0xb1, 0x20, 0x00, 0x3c, 0x17, 0x6d, 0x89, 0x61, 0x33, + 0x86, 0xe8, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0xda, 0x82, 0xf3, 0x9e, 0xcf, 0x42, 0xec, 0x93, 0xfa, + 0xb6, 0xd7, 0x5a, 0xbb, 0x59, 0xbf, 0x43, 0x42, 0x6f, 0x63, 0x7f, 0xc1, 0x69, 0x6c, 0x13, 0x5f, + 0xe6, 0x09, 0xfc, 0xa8, 0xe8, 0xe2, 0xf9, 0x6a, 0x17, 0x5c, 0xdc, 0x95, 0x92, 0xfd, 0x32, 0x9c, + 0xa9, 0x05, 0x6e, 0x2d, 0x08, 0xe3, 0xe5, 0x20, 0xdc, 0x73, 0x42, 0x57, 0xae, 0x94, 0x59, 0x99, + 0x85, 0x84, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30, 0x72, 0x61, 0x3d, 0xcf, 0x98, 0xaf, 0x23, 0x3a, + 0xa3, 0x34, 0x18, 0x1b, 0xa0, 0xf2, 0x4d, 0x5c, 0x73, 0x62, 0x82, 0x6e, 0xb1, 0xa4, 0xb8, 0xc9, + 0x8d, 0x28, 0xaa, 0x3f, 0xa9, 0x25, 0xc5, 0x4d, 0x80, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xd9, + 0x01, 0x76, 0x38, 0xa6, 0x72, 0x16, 0xa0, 0xcf, 0xc1, 0x78, 0x44, 0x6e, 0x7a, 0x7e, 0xfb, 0x9e, + 0x94, 0x09, 0x74, 0x71, 0x27, 0xaa, 0x2f, 0xe9, 0x98, 0x5c, 0xb2, 0x68, 0x96, 0xe1, 0x14, 0x35, + 0xb4, 0x03, 0xe3, 0x7b, 0x9e, 0xef, 0x06, 0x7b, 0x91, 0xa4, 0x5f, 0xca, 0x17, 0x30, 0xde, 0xe5, + 0x98, 0xa9, 0x3e, 0x1a, 0xcd, 0xdd, 0x35, 0x88, 0xe1, 0x14, 0x71, 0xba, 0x00, 0xc3, 0xb6, 0x3f, + 0x1f, 0xdd, 0x8e, 0x48, 0x28, 0xd2, 0x1b, 0xb3, 0x05, 0x88, 0x65, 0x21, 0x4e, 0xe0, 0x74, 0x01, + 0xb2, 0x3f, 0xd7, 0xc2, 0xa0, 0xcd, 0xe3, 0xd8, 0x8b, 0x05, 0x88, 0x55, 0x29, 0xd6, 0x30, 0xe8, + 0x06, 0x65, 0xff, 0x56, 0x03, 0x1f, 0x07, 0x41, 0x2c, 0xb7, 0x34, 0x4b, 0xa8, 0xa9, 0x95, 0x63, + 0x03, 0x0b, 0x2d, 0x03, 0x8a, 0xda, 0xad, 0x56, 0x93, 0xd9, 0x29, 0x38, 0x4d, 0x46, 0x8a, 0xeb, + 0x88, 0x8b, 0x3c, 0x4a, 0x67, 0xbd, 0x03, 0x8a, 0x33, 0x6a, 0xd0, 0xb3, 0x7a, 0x43, 0x74, 0x75, + 0x90, 0x75, 0x95, 0x2b, 0x23, 0xea, 0xbc, 0x9f, 0x12, 0x86, 0x96, 0x60, 0x38, 0xda, 0x8f, 0x1a, + 0xb1, 0x08, 0x37, 0x96, 0x93, 0x96, 0xa6, 0xce, 0x50, 0xb4, 0xac, 0x68, 0xbc, 0x0a, 0x96, 0x75, + 0xed, 0x6f, 0x67, 0xac, 0x00, 0x4b, 0x86, 0x1b, 0xb7, 0x43, 0x82, 0x76, 0x60, 0xac, 0xc5, 0x56, + 0x98, 0x08, 0xcc, 0x2e, 0x96, 0xc9, 0x0b, 0x7d, 0xbe, 0xe9, 0xf7, 0xe8, 0x09, 0xaa, 0x64, 0x6e, + 0xec, 0xb1, 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xd5, 0xb3, 0xec, 0x32, 0xa9, 0xf3, 0x87, + 0xfa, 0xb0, 0x30, 0xac, 0x16, 0xaf, 0x92, 0x99, 0x7c, 0x89, 0x51, 0xf2, 0x45, 0xc2, 0x38, 0x1b, + 0xcb, 0xba, 0xe8, 0xb3, 0x30, 0x4e, 0x99, 0x7c, 0x2d, 0x31, 0xc5, 0xe9, 0x7c, 0x07, 0xf8, 0x24, + 0x1f, 0x85, 0x96, 0xb4, 0x41, 0xaf, 0x8c, 0x53, 0xc4, 0xd0, 0x9b, 0xcc, 0x04, 0xc0, 0xcc, 0x79, + 0xd1, 0x83, 0xb4, 0xae, 0xed, 0x97, 0x64, 0x35, 0x22, 0x79, 0xf9, 0x34, 0xec, 0x87, 0x9b, 0x4f, + 0x03, 0xdd, 0x84, 0x31, 0x91, 0x11, 0x56, 0x08, 0x3a, 0x8b, 0x86, 0x20, 0x6b, 0x0c, 0xeb, 0xc0, + 0xc3, 0x74, 0x01, 0x36, 0x2b, 0xa3, 0x4d, 0xb8, 0xa0, 0x25, 0x75, 0xb9, 0x16, 0x3a, 0x4c, 0x1b, + 0xed, 0xb1, 0x93, 0x48, 0xbb, 0xe6, 0x1e, 0xbb, 0x7f, 0x30, 0x7b, 0x61, 0xad, 0x1b, 0x22, 0xee, + 0x4e, 0x07, 0xdd, 0x82, 0x33, 0xdc, 0x7d, 0xb3, 0x42, 0x1c, 0xb7, 0xe9, 0xf9, 0xea, 0x1e, 0xe5, + 0xbb, 0xe5, 0xdc, 0xfd, 0x83, 0xd9, 0x33, 0xf3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x0d, 0xca, + 0xae, 0x1f, 0x89, 0x31, 0x18, 0x32, 0xf2, 0xe6, 0x94, 0x2b, 0xab, 0x75, 0xf5, 0xfd, 0xc9, 0x1f, + 0x9c, 0x54, 0x40, 0x9b, 0x5c, 0xd8, 0xa9, 0x64, 0x0b, 0xc3, 0x1d, 0x81, 0x67, 0xd2, 0x52, 0x2a, + 0xc3, 0x81, 0x8b, 0x4b, 0xf9, 0x95, 0x5d, 0xb3, 0xe1, 0xdb, 0x65, 0x10, 0x46, 0x6f, 0x00, 0xa2, + 0xcc, 0xb7, 0xd7, 0x20, 0xf3, 0x0d, 0x16, 0xf5, 0x9f, 0xc9, 0x86, 0x4b, 0xa6, 0x4b, 0x51, 0xbd, + 0x03, 0x03, 0x67, 0xd4, 0x42, 0xd7, 0xe9, 0x6d, 0xa0, 0x97, 0x0a, 0xfb, 0x6c, 0x95, 0xe5, 0xac, + 0x42, 0x5a, 0x21, 0x69, 0x38, 0x31, 0x71, 0x4d, 0x8a, 0x38, 0x55, 0x0f, 0xb9, 0x70, 0xde, 0x69, + 0xc7, 0x01, 0x93, 0x23, 0x9b, 0xa8, 0x6b, 0xc1, 0x36, 0xf1, 0x99, 0x0a, 0xa7, 0xb4, 0x70, 0x89, + 0x5e, 0xd4, 0xf3, 0x5d, 0xf0, 0x70, 0x57, 0x2a, 0x94, 0xc1, 0x52, 0x39, 0x4a, 0xc1, 0x8c, 0xa7, + 0x93, 0x91, 0xa7, 0xf4, 0x45, 0x18, 0xd9, 0x0a, 0xa2, 0x78, 0x95, 0xc4, 0x7b, 0x41, 0xb8, 0x2d, + 0xa2, 0x22, 0x26, 0x91, 0x74, 0x13, 0x10, 0xd6, 0xf1, 0xe8, 0x0b, 0x8a, 0x19, 0x18, 0x54, 0x2b, + 0x4c, 0xb7, 0x5b, 0x4a, 0xce, 0x98, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x16, 0x99, + 0x9e, 0x36, 0x85, 0x5a, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x72, 0x42, 0x52, 0x0b, + 0x83, 0x06, 0x89, 0xb4, 0xf8, 0xcd, 0x8f, 0xf2, 0x98, 0x8f, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, + 0x76, 0x3d, 0x44, 0x3a, 0x13, 0x1a, 0x8d, 0xe7, 0x0b, 0xd8, 0x3b, 0x59, 0x81, 0x3e, 0x73, 0x1a, + 0xf9, 0x30, 0xa9, 0x52, 0x29, 0xf1, 0x28, 0x8f, 0xd1, 0xf4, 0x04, 0x5b, 0xdb, 0xfd, 0x87, 0x88, + 0x54, 0x2a, 0x8b, 0x6a, 0x8a, 0x12, 0xee, 0xa0, 0x6d, 0x84, 0x4c, 0x9a, 0xec, 0x99, 0xb4, 0xf6, + 0x2a, 0x94, 0xa3, 0xf6, 0xba, 0x1b, 0xec, 0x38, 0x9e, 0xcf, 0xf4, 0xb4, 0x1a, 0x2b, 0x5f, 0x97, + 0x00, 0x9c, 0xe0, 0xa0, 0x65, 0x28, 0x39, 0x52, 0x1f, 0x81, 0xf2, 0x23, 0x6d, 0x28, 0x2d, 0x04, + 0x77, 0x3e, 0x97, 0x1a, 0x08, 0x55, 0x17, 0xbd, 0x0a, 0x63, 0xc2, 0xfd, 0x50, 0x64, 0xf1, 0x3b, + 0x65, 0xfa, 0x88, 0xd4, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18, 0x89, 0x83, 0x26, 0x73, 0x74, + 0xa0, 0x1c, 0xd2, 0xd9, 0xfc, 0x68, 0x5d, 0x6b, 0x0a, 0x4d, 0x17, 0x05, 0xaa, 0xaa, 0x58, 0xa7, + 0x83, 0xd6, 0xf8, 0x7a, 0x67, 0x71, 0x8c, 0x49, 0x34, 0xfd, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0x1d, + 0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0x5a, 0xa1, 0x17, 0xb0, 0x35, 0xa1, + 0x54, 0x51, 0xd3, 0x66, 0xf6, 0x95, 0x5a, 0x1a, 0x01, 0x77, 0xd6, 0x61, 0xde, 0xa3, 0xa2, 0x70, + 0xfa, 0x1c, 0xcf, 0xda, 0xcb, 0x5f, 0x46, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x61, 0x27, 0x31, 0x7f, + 0xd4, 0x4f, 0xcf, 0xe4, 0x07, 0xf7, 0xd0, 0x1f, 0xff, 0x9c, 0xef, 0x53, 0x7f, 0x71, 0x42, 0x01, + 0xb9, 0x5a, 0x46, 0x38, 0xca, 0x6c, 0x47, 0xd3, 0xe7, 0xbb, 0x58, 0x79, 0xa5, 0x38, 0xf3, 0x84, + 0x21, 0x30, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x05, 0x26, 0x45, 0x30, 0xb1, 0x64, 0x98, 0x2e, + 0x24, 0xe6, 0xa3, 0x38, 0x05, 0xc3, 0x1d, 0xd8, 0x3c, 0xbe, 0xbb, 0xb3, 0xde, 0x24, 0xe2, 0xe8, + 0xbb, 0xe9, 0xf9, 0xdb, 0xd1, 0xf4, 0x45, 0x76, 0x3e, 0x88, 0xf8, 0xee, 0x69, 0x28, 0xce, 0xa8, + 0x81, 0xd6, 0x60, 0xb2, 0x15, 0x12, 0xb2, 0xc3, 0x78, 0x64, 0x71, 0x9f, 0xcd, 0x72, 0xe7, 0x69, + 0xda, 0x93, 0x5a, 0x0a, 0x76, 0x98, 0x51, 0x86, 0x3b, 0x28, 0xa0, 0x3d, 0x28, 0x05, 0xbb, 0x24, + 0xdc, 0x22, 0x8e, 0x3b, 0x7d, 0xa9, 0x8b, 0x39, 0xb3, 0xb8, 0xdc, 0x6e, 0x09, 0xdc, 0x94, 0xfa, + 0x5a, 0x16, 0xf7, 0x56, 0x5f, 0xcb, 0xc6, 0xd0, 0x0f, 0x5a, 0x70, 0x4e, 0x4a, 0xbc, 0xeb, 0x2d, + 0x3a, 0xea, 0x8b, 0x81, 0x1f, 0xc5, 0x21, 0x77, 0xf7, 0x7d, 0x2c, 0xdf, 0x05, 0x76, 0x2d, 0xa7, + 0x92, 0x92, 0x2b, 0x9e, 0xcb, 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x33, 0xdf, 0x0c, 0x53, 0x1d, 0x37, + 0xf7, 0x51, 0x52, 0x4e, 0xcc, 0x6c, 0xc3, 0x98, 0x31, 0x3a, 0x0f, 0x55, 0x73, 0xf9, 0x2f, 0x87, + 0xa1, 0xac, 0xb4, 0x5a, 0xe8, 0xaa, 0xa9, 0xac, 0x3c, 0x97, 0x56, 0x56, 0x96, 0xe8, 0x6b, 0x56, + 0xd7, 0x4f, 0xae, 0x65, 0x04, 0x57, 0xca, 0xdb, 0x8b, 0xfd, 0x7b, 0xcd, 0x6a, 0x42, 0xca, 0x62, + 0xdf, 0x5a, 0xcf, 0x81, 0xae, 0x72, 0xcf, 0x6b, 0x30, 0xe5, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, + 0x05, 0xd8, 0x95, 0x5f, 0xd6, 0xa3, 0x15, 0xa4, 0x10, 0x70, 0x67, 0x1d, 0xda, 0x20, 0xbf, 0xb3, + 0xd3, 0x82, 0x56, 0x7e, 0xa5, 0x63, 0x01, 0x45, 0x8f, 0xc3, 0x60, 0x2b, 0x70, 0xab, 0x35, 0xc1, + 0x2a, 0x6a, 0xe9, 0x47, 0xdd, 0x6a, 0x0d, 0x73, 0x18, 0x9a, 0x87, 0x21, 0xf6, 0x23, 0x9a, 0x1e, + 0xcd, 0x77, 0x4b, 0x67, 0x35, 0xb4, 0x84, 0x1e, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc0, 0x87, 0xf2, + 0xd7, 0x4c, 0xe0, 0x33, 0xfc, 0x80, 0x02, 0x1f, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, + 0xbc, 0x69, 0xf8, 0x12, 0x21, 0x91, 0x70, 0x8d, 0x7d, 0xbc, 0xeb, 0x63, 0x46, 0x68, 0x49, 0x2f, + 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0x35, 0x3a, 0x5a, 0x2d, + 0xf5, 0xdf, 0xaa, 0x9a, 0xd0, 0xce, 0x16, 0x3b, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x6e, 0x10, 0xb1, + 0x63, 0x56, 0xb0, 0xb7, 0xd2, 0xaf, 0xb2, 0xf4, 0xe6, 0xad, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1d, + 0xa9, 0x05, 0xae, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0xbd, 0x16, 0xcc, 0x74, 0x3e, 0x9a, 0x54, 0xa7, + 0xc7, 0xfa, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x59, 0xca, 0x25, 0x87, 0xbb, 0x34, 0x65, 0x7f, 0x99, + 0x6b, 0x34, 0x85, 0xde, 0x83, 0x44, 0xed, 0xe6, 0x49, 0x24, 0x40, 0x5c, 0x32, 0x54, 0x32, 0x0f, + 0xac, 0x35, 0xff, 0x35, 0x8b, 0x69, 0xcd, 0xd7, 0xc8, 0x4e, 0xab, 0xe9, 0xc4, 0x27, 0xe1, 0x96, + 0xf7, 0x26, 0x94, 0x62, 0xd1, 0x5a, 0xb7, 0x9c, 0x8d, 0x5a, 0xa7, 0x98, 0xe5, 0x80, 0x62, 0x36, + 0x65, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc8, 0x67, 0x40, 0x42, 0x4e, 0x40, 0xf2, 0x5d, 0x31, 0x25, + 0xdf, 0xb3, 0x3d, 0xbe, 0x20, 0x47, 0x02, 0xfe, 0x0f, 0xcc, 0x7e, 0x33, 0x21, 0xcb, 0x07, 0xdd, + 0x5c, 0xc3, 0xfe, 0x61, 0x0b, 0x4e, 0x67, 0xd9, 0x37, 0xd2, 0x07, 0x02, 0x17, 0xf1, 0x28, 0xf3, + 0x15, 0x35, 0x82, 0x77, 0x44, 0x39, 0x56, 0x18, 0x7d, 0xa7, 0x43, 0x3a, 0x5a, 0x78, 0xd0, 0x5b, + 0x30, 0x56, 0x0b, 0x89, 0x76, 0xa1, 0xbd, 0xce, 0xfd, 0x6c, 0x79, 0x7f, 0x9e, 0x3e, 0xb2, 0x8f, + 0xad, 0xfd, 0x33, 0x05, 0x38, 0xcd, 0xf5, 0xcf, 0xf3, 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x57, 0xa4, + 0xb2, 0x7a, 0x0b, 0x46, 0x5b, 0x9a, 0x5c, 0xae, 0x5b, 0xa8, 0x3b, 0x5d, 0x7e, 0x97, 0x48, 0x12, + 0xf4, 0x52, 0x6c, 0xd0, 0x42, 0x2e, 0x8c, 0x92, 0x5d, 0xaf, 0xa1, 0x94, 0x98, 0x85, 0x23, 0x5f, + 0x2e, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0xec, 0xa6, 0xf6, 0x8f, 0x58, 0xf0, + 0x48, 0x4e, 0x60, 0x3c, 0xda, 0xdc, 0x1e, 0xd3, 0xf4, 0x8b, 0x44, 0x89, 0xaa, 0x39, 0xae, 0xff, + 0xc7, 0x02, 0x8a, 0x3e, 0x0d, 0xc0, 0xf5, 0xf7, 0xf4, 0x85, 0xda, 0x2b, 0x82, 0x98, 0x11, 0xfc, + 0x48, 0x8b, 0x63, 0x23, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x93, 0x45, 0x18, 0xe4, 0x29, 0x9e, 0x97, + 0x61, 0x78, 0x8b, 0x07, 0xf8, 0xef, 0x27, 0x97, 0x40, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, + 0x2b, 0x70, 0x8a, 0x27, 0x48, 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0x5c, 0x50, + 0x09, 0xfc, 0xaa, 0x9d, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x1d, 0xc6, 0xe9, 0xc3, 0x23, 0x68, 0xc7, + 0x92, 0x12, 0x4f, 0x8d, 0xa0, 0x5e, 0x3a, 0x6b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xd5, + 0x21, 0xd2, 0x1b, 0x4c, 0xde, 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0x86, 0x8d, 0x6d, 0x66, 0xc6, + 0xb9, 0xb6, 0x15, 0x92, 0x68, 0x2b, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd4, 0x0c, 0x1b, 0x53, 0x70, + 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x70, 0xbc, 0x66, 0x3b, 0x24, 0x09, 0x95, 0x21, 0x93, 0xca, 0x72, + 0x0a, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0xd1, 0x3e, 0x94, + 0xb5, 0xea, 0xb0, 0x74, 0x4c, 0xec, 0x12, 0x17, 0x4b, 0xd8, 0xf3, 0x71, 0x0a, 0x86, 0xaa, 0xba, + 0x2e, 0x5c, 0x12, 0x25, 0x15, 0xf4, 0x2c, 0x8c, 0x88, 0xb0, 0xf7, 0xcc, 0xa8, 0x92, 0x4f, 0x1d, + 0x53, 0xad, 0x57, 0x92, 0x62, 0xac, 0xe3, 0xd8, 0xdf, 0x57, 0x80, 0x53, 0x19, 0x56, 0xf1, 0xfc, + 0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, 0x50, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, + 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, 0xea, 0x54, 0x40, 0x8f, 0x98, 0x8a, 0xec, 0x12, 0x0c, 0xb4, + 0x23, 0x22, 0x23, 0xda, 0xa9, 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0x9b, 0x4a, 0x79, + 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, + 0x12, 0x9a, 0x89, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0x7e, 0x32, 0xb4, 0xeb, + 0x3b, 0x81, 0xef, 0xc5, 0x81, 0xb2, 0x59, 0xe0, 0xe1, 0x98, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63, + 0x85, 0x81, 0x2e, 0xc3, 0x20, 0x13, 0x3a, 0x75, 0xa4, 0x92, 0x5b, 0xa8, 0xf0, 0xf8, 0x1c, 0x1c, + 0xdc, 0x77, 0x9a, 0xce, 0xc7, 0x61, 0xa0, 0x15, 0x04, 0xcd, 0xf4, 0xa1, 0x45, 0xbb, 0x1b, 0x04, + 0x4d, 0xcc, 0x80, 0xe8, 0x63, 0x62, 0xbc, 0x52, 0x4a, 0x7a, 0xec, 0xb8, 0x41, 0xa4, 0x0d, 0xda, + 0x93, 0x30, 0xbc, 0x4d, 0xf6, 0x43, 0xcf, 0xdf, 0x4c, 0x1b, 0x6f, 0xdc, 0xe0, 0xc5, 0x58, 0xc2, + 0xcd, 0xac, 0x40, 0xc3, 0xc7, 0x9d, 0x5f, 0xb3, 0xd4, 0xf3, 0x0a, 0xfc, 0xfe, 0x22, 0x4c, 0xe0, + 0x85, 0xca, 0x87, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3b, 0xbf, 0x66, 0xef, 0xd9, 0xf8, 0x45, + 0x0b, 0x26, 0x58, 0xf0, 0x7d, 0x11, 0xc8, 0xc7, 0x0b, 0xfc, 0x13, 0x60, 0xf1, 0x1e, 0x87, 0xc1, + 0x90, 0x36, 0x9a, 0xce, 0x21, 0xc7, 0x7a, 0x82, 0x39, 0x0c, 0x9d, 0x87, 0x01, 0xd6, 0x05, 0x3a, + 0x79, 0xa3, 0x3c, 0xfd, 0x4e, 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0x74, 0x0a, 0x4c, 0x5a, 0x4d, + 0x8f, 0x77, 0x3a, 0x51, 0x09, 0x7e, 0x30, 0xa2, 0x53, 0x64, 0x76, 0xed, 0xfd, 0x45, 0xa7, 0xc8, + 0x26, 0xd9, 0xfd, 0xf9, 0xf4, 0x87, 0x05, 0xb8, 0x98, 0x59, 0xaf, 0xef, 0xe8, 0x14, 0xdd, 0x6b, + 0x3f, 0xcc, 0x20, 0xed, 0xc5, 0x13, 0x34, 0x8d, 0x1b, 0xe8, 0x97, 0xc3, 0x1c, 0xec, 0x23, 0x68, + 0x44, 0xe6, 0x90, 0x7d, 0x40, 0x82, 0x46, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, + 0x6f, 0x61, 0x0f, 0xc1, 0x2b, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xa3, 0xfc, 0x8c, 0xe1, + 0x65, 0x58, 0x41, 0xd1, 0x3c, 0x4c, 0xec, 0x78, 0x3e, 0x3d, 0x7c, 0xf6, 0x4d, 0xc6, 0x4f, 0xc5, + 0xf4, 0x59, 0x31, 0xc1, 0x38, 0x8d, 0x8f, 0x3c, 0x2d, 0xa0, 0x44, 0x21, 0x3f, 0x2b, 0x73, 0x6e, + 0x6f, 0xe7, 0x4c, 0x75, 0xa9, 0x1a, 0xc5, 0x8c, 0xe0, 0x12, 0x2b, 0xda, 0xfb, 0xbf, 0xd8, 0xff, + 0xfb, 0x7f, 0x34, 0xfb, 0xed, 0x3f, 0xf3, 0x2a, 0x8c, 0x3d, 0xb0, 0xc0, 0xd7, 0xfe, 0x6a, 0x11, + 0x1e, 0xed, 0xb2, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, 0xd6, 0x77, 0xcc, 0x43, 0x0d, 0x4e, + 0x6f, 0xb4, 0x9b, 0xcd, 0x7d, 0x66, 0x7d, 0x4e, 0x5c, 0x89, 0x21, 0x78, 0xca, 0xf3, 0x32, 0xe1, + 0xd1, 0x72, 0x06, 0x0e, 0xce, 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, + 0x1e, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x6b, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x8f, 0xca, 0x29, 0x09, + 0x70, 0x8e, 0x5e, 0xc9, 0xe9, 0xe6, 0xd3, 0x08, 0xb8, 0xb3, 0x0e, 0x7a, 0x03, 0x50, 0x20, 0xb2, + 0xca, 0x5f, 0x23, 0xbe, 0xd0, 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xad, 0x0e, 0x0c, 0x9c, + 0x51, 0x2b, 0x15, 0xa0, 0x61, 0x28, 0x3f, 0x40, 0x43, 0xf7, 0x73, 0xb1, 0x67, 0x7e, 0x80, 0xff, + 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, 0x66, 0x9c, 0xb1, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, + 0xb1, 0x12, 0xce, 0x68, 0x06, 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0x8b, 0x9e, + 0xc1, 0xe2, 0x8b, 0x60, 0x28, 0x0a, 0x03, 0x7d, 0x06, 0x86, 0x5d, 0x6f, 0xd7, 0x8b, 0x82, 0x50, + 0xac, 0xf4, 0x23, 0xaa, 0x0b, 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xfe, 0x02, + 0x8c, 0xc9, 0x16, 0xdf, 0x6c, 0x07, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe, 0x58, + 0xb7, 0x88, 0x30, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xd4, 0x75, 0xfc, 0x44, 0x6f, 0x52, 0xdd, + 0xaf, 0xe1, 0x7f, 0x64, 0xc1, 0x94, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xb2, 0x79, 0x1b, 0x3c, 0xd6, + 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x61, 0x60, 0xcb, + 0x09, 0xdd, 0x6e, 0x11, 0xb0, 0x3b, 0x2a, 0xcd, 0x5d, 0x77, 0x42, 0xa1, 0xd6, 0x7b, 0x5a, 0x25, + 0x45, 0x76, 0xc2, 0xde, 0x2a, 0x3d, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xa2, 0x46, 0xd0, 0x52, 0xf6, + 0xe2, 0x97, 0x78, 0xc2, 0x64, 0x5a, 0x72, 0x78, 0x30, 0x8b, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, + 0xe8, 0x2d, 0x18, 0x63, 0xbf, 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x5b, 0x4e, 0x5d, 0x47, 0xe4, 0x06, + 0x68, 0x46, 0x11, 0x36, 0x49, 0xcd, 0x6c, 0x42, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xdb, + 0x22, 0x9c, 0xca, 0x58, 0x73, 0x28, 0x32, 0x66, 0xe2, 0xd9, 0x3e, 0x97, 0xea, 0xfb, 0x9c, 0x8b, + 0x88, 0xbd, 0x86, 0x5c, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xde, + 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, + 0xa7, 0xb3, 0x82, 0x54, 0xa1, 0x6f, 0x4b, 0x65, 0x4e, 0x7b, 0xa1, 0xdf, 0xf0, 0x56, 0x3c, 0x9d, + 0x1a, 0x97, 0x01, 0x2f, 0xcc, 0x99, 0xb9, 0xd4, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0xb9, 0x9f, 0x87, + 0x3c, 0xe3, 0x9d, 0x3c, 0x3e, 0x3e, 0xd9, 0x77, 0x07, 0x44, 0xaa, 0xbc, 0x28, 0xa5, 0xbf, 0x97, + 0xc5, 0xbd, 0xf5, 0xf7, 0xb2, 0xe5, 0x19, 0x0f, 0x46, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4d, + 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, + 0xb9, 0x62, 0xb1, 0x4b, 0x30, 0x10, 0x06, 0x4d, 0x92, 0x4e, 0x54, 0x86, 0x83, 0x26, 0xc1, 0x0c, + 0x42, 0x31, 0xe2, 0x44, 0xd8, 0x31, 0xaa, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x71, 0x18, 0x6c, 0x92, + 0x5d, 0xd2, 0x4c, 0xe7, 0x93, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba, + 0x06, 0x70, 0xa0, 0xcf, 0xa1, 0x4d, 0x27, 0x26, 0x7b, 0xce, 0x7e, 0x3a, 0xf0, 0xfb, 0x35, 0x5e, + 0x8c, 0x25, 0x9c, 0xf9, 0xab, 0xf0, 0xf8, 0xad, 0x29, 0x21, 0xa2, 0x08, 0xdb, 0x2a, 0xa0, 0xa6, + 0x50, 0xaa, 0x78, 0x1c, 0x42, 0xa9, 0xe7, 0x00, 0xa2, 0xa8, 0xc9, 0x0d, 0x5f, 0x5c, 0xe1, 0x08, + 0x93, 0xc4, 0xf9, 0xad, 0xdf, 0x14, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb6, 0xc2, 0x20, 0xe6, + 0x32, 0xd9, 0x0a, 0xb7, 0x0d, 0x1b, 0x34, 0x7d, 0xe7, 0x6b, 0x29, 0x38, 0xee, 0xa8, 0x81, 0x5e, + 0x84, 0x11, 0xe1, 0x4f, 0x5f, 0x0b, 0x82, 0xa6, 0x10, 0x03, 0x29, 0x73, 0xa9, 0x7a, 0x02, 0xc2, + 0x3a, 0x9e, 0x56, 0x8d, 0x09, 0x7a, 0x87, 0x33, 0xab, 0x71, 0x61, 0xaf, 0x86, 0x97, 0x0a, 0x58, + 0x57, 0xea, 0x2b, 0x60, 0x5d, 0x22, 0x18, 0x2b, 0xf7, 0xad, 0xdb, 0x82, 0x9e, 0xa2, 0xa4, 0x9f, + 0x1b, 0x80, 0x53, 0x62, 0xe1, 0x3c, 0xec, 0xe5, 0x72, 0xbb, 0x73, 0xb9, 0x1c, 0x87, 0xe8, 0xec, + 0xc3, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, 0x2c, 0x30, 0xd9, 0x2b, 0xf4, 0x7f, 0xe7, 0x66, 0xce, + 0x78, 0x31, 0x97, 0x5d, 0x73, 0xe5, 0x05, 0xf2, 0x3e, 0x73, 0x68, 0xd8, 0xff, 0xc1, 0x82, 0xc7, + 0x7a, 0x52, 0x44, 0x4b, 0x50, 0x66, 0x3c, 0xa0, 0xf6, 0x3a, 0x7b, 0x42, 0xd9, 0x8e, 0x4a, 0x40, + 0x0e, 0x4b, 0x9a, 0xd4, 0x44, 0x4b, 0x1d, 0x29, 0x4a, 0x9e, 0xcc, 0x48, 0x51, 0x72, 0xc6, 0x18, + 0x9e, 0x07, 0xcc, 0x51, 0xf2, 0xe5, 0x22, 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0xcf, 0xb0, 0x65, 0x21, + 0xb7, 0xed, 0x12, 0x11, 0x8f, 0xf7, 0x65, 0xae, 0xe2, 0xc4, 0x0e, 0x67, 0x13, 0xd4, 0x6d, 0x95, + 0x48, 0x78, 0xd1, 0xe7, 0x00, 0xa2, 0x38, 0xf4, 0xfc, 0x4d, 0x5a, 0x26, 0x62, 0x25, 0x7e, 0xbc, + 0x0b, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x93, 0x9d, 0xab, 0x00, 0x58, 0xa3, 0x88, 0xe6, 0x8c, 0xfb, + 0x72, 0x26, 0x25, 0xf8, 0x04, 0x4e, 0x35, 0xb9, 0x3d, 0x67, 0x5e, 0x82, 0xb2, 0x22, 0xde, 0x4b, + 0x8a, 0x33, 0xaa, 0x33, 0x17, 0x9f, 0x82, 0x89, 0x54, 0xdf, 0x8e, 0x24, 0x04, 0xfa, 0x25, 0x0b, + 0x26, 0x78, 0x67, 0x96, 0xfc, 0x5d, 0x71, 0xa6, 0xbe, 0x07, 0xa7, 0x9b, 0x19, 0x67, 0x9b, 0x98, + 0xd1, 0xfe, 0xcf, 0x42, 0x25, 0xf4, 0xc9, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x15, 0xba, 0x6e, 0xe9, + 0xd9, 0xe5, 0x34, 0x85, 0x5b, 0xe3, 0x28, 0x5f, 0xb3, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0xdb, 0x16, + 0x4c, 0xf1, 0x9e, 0xdf, 0x20, 0xfb, 0x6a, 0x87, 0x7f, 0x3d, 0xfb, 0x2e, 0xb2, 0x06, 0x15, 0x72, + 0xb2, 0x06, 0xe9, 0x9f, 0x56, 0xec, 0xfa, 0x69, 0x3f, 0x63, 0x81, 0x58, 0x21, 0x27, 0xf0, 0x94, + 0xff, 0x66, 0xf3, 0x29, 0x3f, 0x93, 0xbf, 0x09, 0x72, 0xde, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47, + 0x48, 0x74, 0xce, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0xb7, 0xe8, 0x0d, 0xb2, 0xbf, 0x16, 0xd4, 0x9c, + 0x78, 0x2b, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, 0xae, 0xdc, 0x40, 0x47, 0x48, 0x58, + 0x7c, 0xe4, 0xa0, 0xfa, 0xf6, 0xd7, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, + 0xd5, 0xae, 0x8b, 0xe4, 0x68, 0x52, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, 0x0c, 0x07, 0x8a, + 0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0x0f, 0x10, 0x74, 0x07, 0x46, 0x1b, + 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0xa2, 0x86, 0x27, 0x54, + 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x39, 0x80, 0x56, 0xe8, 0xed, 0x7a, 0x4d, 0xb2, 0xc9, 0x24, + 0x0e, 0xcc, 0x91, 0x9a, 0x9b, 0xd1, 0xc8, 0x52, 0xac, 0x61, 0x64, 0x78, 0xaa, 0x16, 0x1f, 0xb2, + 0xa7, 0x2a, 0x9c, 0x98, 0xa7, 0xea, 0xc0, 0x91, 0x3c, 0x55, 0x4b, 0x47, 0xf6, 0x54, 0x1d, 0xec, + 0xcb, 0x53, 0x15, 0xc3, 0x59, 0xc9, 0xc1, 0xd1, 0xff, 0xcb, 0x5e, 0x93, 0x08, 0xb6, 0x9d, 0x7b, + 0x7f, 0xcf, 0xdc, 0x3f, 0x98, 0x3d, 0x8b, 0x33, 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x69, 0x98, 0x76, + 0x9a, 0xcd, 0x60, 0x4f, 0x4d, 0xea, 0x52, 0xd4, 0x70, 0x9a, 0x5c, 0x94, 0x3f, 0xcc, 0xa8, 0x9e, + 0xbf, 0x7f, 0x30, 0x3b, 0x3d, 0x9f, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0xd7, 0xa0, 0xdc, 0x0a, 0x83, + 0xc6, 0x8a, 0xe6, 0xa6, 0x76, 0x91, 0x0e, 0x60, 0x4d, 0x16, 0x1e, 0x1e, 0xcc, 0x8e, 0xa9, 0x3f, + 0xec, 0xc2, 0x4f, 0x2a, 0xd8, 0xdb, 0x70, 0xaa, 0x4e, 0x42, 0x8f, 0xa5, 0x1f, 0x76, 0x93, 0xf3, + 0x63, 0x0d, 0xca, 0x61, 0xea, 0xc4, 0xec, 0x2b, 0x8a, 0x9c, 0x16, 0x7d, 0x5c, 0x9e, 0x90, 0x09, + 0x21, 0xfb, 0x7f, 0x5a, 0x30, 0x2c, 0x3c, 0x32, 0x4e, 0x80, 0x51, 0x9b, 0x37, 0xe4, 0xe5, 0xb3, + 0xd9, 0xb7, 0x0a, 0xeb, 0x4c, 0xae, 0xa4, 0xbc, 0x9a, 0x92, 0x94, 0x3f, 0xd6, 0x8d, 0x48, 0x77, + 0x19, 0xf9, 0x5f, 0x2b, 0xc2, 0xb8, 0xe9, 0xba, 0x77, 0x02, 0x43, 0xb0, 0x0a, 0xc3, 0x91, 0xf0, + 0x4d, 0x2b, 0xe4, 0x5b, 0x64, 0xa7, 0x27, 0x31, 0xb1, 0xd6, 0x12, 0xde, 0x68, 0x92, 0x48, 0xa6, + 0xd3, 0x5b, 0xf1, 0x21, 0x3a, 0xbd, 0xf5, 0xf2, 0x9e, 0x1c, 0x38, 0x0e, 0xef, 0x49, 0xfb, 0x2b, + 0xec, 0x66, 0xd3, 0xcb, 0x4f, 0x80, 0xe9, 0xb9, 0x66, 0xde, 0x81, 0x76, 0x97, 0x95, 0x25, 0x3a, + 0x95, 0xc3, 0xfc, 0xfc, 0x82, 0x05, 0x17, 0x32, 0xbe, 0x4a, 0xe3, 0x84, 0x9e, 0x86, 0x92, 0xd3, + 0x76, 0x3d, 0xb5, 0x97, 0x35, 0xad, 0xd9, 0xbc, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc2, 0x14, 0xb9, + 0xd7, 0xf2, 0xb8, 0xc2, 0x50, 0x37, 0xa9, 0x2c, 0xf2, 0xc8, 0xda, 0x4b, 0x69, 0x20, 0xee, 0xc4, + 0x57, 0xc1, 0x1e, 0x8a, 0xb9, 0xc1, 0x1e, 0xfe, 0xae, 0x05, 0x23, 0xca, 0x3b, 0xeb, 0xa1, 0x8f, + 0xf6, 0xb7, 0x98, 0xa3, 0xfd, 0x68, 0x97, 0xd1, 0xce, 0x19, 0xe6, 0xbf, 0x51, 0x50, 0xfd, 0xad, + 0x05, 0x61, 0xdc, 0x07, 0x87, 0xf5, 0x32, 0x94, 0x5a, 0x61, 0x10, 0x07, 0x8d, 0xa0, 0x29, 0x18, + 0xac, 0xf3, 0x49, 0xd4, 0x13, 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, + 0x82, 0xa9, 0x49, 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xc8, 0x05, 0x88, 0x9d, 0x70, 0x93, 0xc4, + 0xb4, 0x4c, 0x44, 0x59, 0xca, 0x3f, 0x3c, 0xda, 0xb1, 0xd7, 0x9c, 0xf3, 0xfc, 0x38, 0x8a, 0xc3, + 0xb9, 0xaa, 0x1f, 0xdf, 0x0a, 0xf9, 0x7b, 0x4d, 0x0b, 0x63, 0xa2, 0x68, 0x61, 0x8d, 0xae, 0x74, + 0x2b, 0x66, 0x6d, 0x0c, 0x9a, 0xfa, 0xf7, 0x55, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x12, 0xbb, 0x4a, + 0xd8, 0x00, 0x1d, 0x2d, 0xee, 0xc7, 0x97, 0xcb, 0x6a, 0x68, 0x99, 0xf2, 0xad, 0xa2, 0x47, 0x17, + 0xe9, 0x7e, 0x72, 0xd3, 0x86, 0x75, 0x17, 0xa3, 0x24, 0x04, 0x09, 0xfa, 0xd6, 0x0e, 0x9b, 0x8a, + 0x67, 0x7a, 0x5c, 0x01, 0x47, 0xb0, 0xa2, 0x60, 0xd1, 0xfe, 0x59, 0x2c, 0xf4, 0x6a, 0x4d, 0x2c, + 0x72, 0x2d, 0xda, 0xbf, 0x00, 0xe0, 0x04, 0x07, 0x5d, 0x15, 0xaf, 0x71, 0x2e, 0x9a, 0x7e, 0x34, + 0xf5, 0x1a, 0x97, 0x9f, 0xaf, 0x09, 0xb3, 0x9f, 0x85, 0x11, 0x95, 0xeb, 0xb2, 0xc6, 0x53, 0x28, + 0x8a, 0x98, 0x53, 0x4b, 0x49, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x4c, 0x44, 0x5c, 0xd4, 0xa3, 0x42, + 0x8b, 0x72, 0x91, 0xd9, 0xc7, 0xa5, 0x21, 0x4a, 0xdd, 0x04, 0x1f, 0xb2, 0x22, 0x7e, 0x74, 0x48, + 0x57, 0xde, 0x34, 0x09, 0xf4, 0x3a, 0x8c, 0x37, 0x03, 0xc7, 0x5d, 0x70, 0x9a, 0x8e, 0xdf, 0x60, + 0xdf, 0x5b, 0x32, 0x53, 0xa6, 0xdd, 0x34, 0xa0, 0x38, 0x85, 0x4d, 0x39, 0x1f, 0xbd, 0x44, 0x84, + 0xc3, 0x75, 0xfc, 0x4d, 0x12, 0x89, 0xcc, 0x85, 0x8c, 0xf3, 0xb9, 0x99, 0x83, 0x83, 0x73, 0x6b, + 0xa3, 0x97, 0x61, 0x54, 0x7e, 0xbe, 0xe6, 0xf9, 0x9e, 0xd8, 0xde, 0x6b, 0x30, 0x6c, 0x60, 0xa2, + 0x3d, 0x38, 0x23, 0xff, 0xaf, 0x85, 0xce, 0xc6, 0x86, 0xd7, 0x10, 0xee, 0xa0, 0xdc, 0x31, 0x6e, + 0x5e, 0x7a, 0x6f, 0x2d, 0x65, 0x21, 0x1d, 0x1e, 0xcc, 0x5e, 0x12, 0xa3, 0x96, 0x09, 0x67, 0x93, + 0x98, 0x4d, 0x1f, 0xad, 0xc0, 0xa9, 0x2d, 0xe2, 0x34, 0xe3, 0xad, 0xc5, 0x2d, 0xd2, 0xd8, 0x96, + 0x9b, 0x88, 0xf9, 0xd3, 0x6b, 0x16, 0xeb, 0xd7, 0x3b, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x36, 0x4c, + 0xb7, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0x5a, 0x0d, 0x62, 0x66, 0x8d, 0xa2, 0x52, 0x67, 0x0a, 0xc7, + 0x7b, 0x15, 0xb1, 0xa0, 0x96, 0x83, 0x87, 0x73, 0x29, 0xa0, 0xf7, 0xe0, 0x4c, 0x6a, 0x31, 0x08, + 0xd7, 0xe3, 0xf1, 0xfc, 0xe0, 0xe2, 0xf5, 0xac, 0x0a, 0xc2, 0x8b, 0x3f, 0x0b, 0x84, 0xb3, 0x9b, + 0x40, 0x2f, 0x40, 0xc9, 0x6b, 0x2d, 0x3b, 0x3b, 0x5e, 0x73, 0x9f, 0x45, 0x47, 0x2f, 0xb3, 0x88, + 0xe1, 0xa5, 0x6a, 0x8d, 0x97, 0x1d, 0x6a, 0xbf, 0xb1, 0xc2, 0xa4, 0xfc, 0xbe, 0x16, 0x03, 0x32, + 0x9a, 0x9e, 0x4c, 0x8c, 0x6d, 0xb5, 0x40, 0x91, 0x11, 0x36, 0xb0, 0xde, 0x9f, 0x0d, 0xd3, 0xbb, + 0xb4, 0xb2, 0xc6, 0x00, 0xa2, 0xcf, 0xc3, 0xa8, 0xbe, 0x62, 0xc5, 0x65, 0x76, 0x39, 0x9b, 0x3f, + 0xd2, 0x56, 0x36, 0x67, 0x1f, 0xd5, 0xea, 0xd5, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x4b, + 0x74, 0x13, 0x4a, 0x8d, 0xa6, 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x16, 0xbe, 0x68, 0x51, 0xe0, 0x88, + 0xc9, 0x11, 0x91, 0x9f, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x6c, 0x8f, 0x30, 0xe2, + 0x29, 0x51, 0xbb, 0xd5, 0x97, 0xa8, 0x7d, 0x5e, 0x26, 0x1d, 0x5d, 0x4d, 0xc9, 0x1f, 0x52, 0x09, + 0x45, 0x13, 0x29, 0x44, 0x1a, 0xbf, 0x6f, 0xd3, 0x67, 0x5d, 0x5a, 0x3f, 0xd0, 0xd3, 0x78, 0xdf, + 0xd0, 0xd2, 0x0d, 0xf6, 0xff, 0xe8, 0xc9, 0xd5, 0xb8, 0xd8, 0x5f, 0x29, 0xc0, 0x19, 0x35, 0x84, + 0xdf, 0xb8, 0x03, 0x77, 0xbb, 0x73, 0xe0, 0x8e, 0x41, 0x5f, 0x65, 0xdf, 0x82, 0x21, 0x1e, 0x8f, + 0xa9, 0x0f, 0x66, 0xeb, 0x71, 0x33, 0x74, 0xa1, 0x62, 0x09, 0x8c, 0xf0, 0x85, 0xdf, 0x6b, 0xc1, + 0xc4, 0xda, 0x62, 0xad, 0x1e, 0x34, 0xb6, 0x49, 0x3c, 0xcf, 0x99, 0x63, 0x2c, 0x78, 0x2d, 0xeb, + 0x01, 0x79, 0xa8, 0x2c, 0xee, 0xec, 0x12, 0x0c, 0x6c, 0x05, 0x51, 0x9c, 0x56, 0x66, 0x5f, 0x0f, + 0xa2, 0x18, 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0x34, 0xdb, 0xbd, 0x12, 0xbd, 0xf7, 0xf3, + 0x5d, 0xe8, 0x45, 0x18, 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0xde, 0xc7, 0x43, 0x4b, + 0xac, 0x94, 0x32, 0x18, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x17, 0xca, 0xb1, 0xb7, 0x43, + 0xe6, 0x5d, 0x57, 0xa8, 0x03, 0x1f, 0xc0, 0x83, 0x7a, 0x4d, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x4b, + 0x05, 0x80, 0x24, 0x1a, 0x47, 0xaf, 0x4f, 0x5c, 0xe8, 0x50, 0x14, 0x5d, 0xce, 0x50, 0x14, 0xa1, + 0x84, 0x60, 0x86, 0x96, 0x48, 0x0d, 0x53, 0xb1, 0xaf, 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc2, + 0x54, 0x12, 0x4d, 0xc4, 0x0c, 0xa6, 0xc4, 0x1e, 0x44, 0x6b, 0x69, 0x20, 0xee, 0xc4, 0xb7, 0x09, + 0x5c, 0x52, 0x41, 0x15, 0xc4, 0x5d, 0xc3, 0xac, 0x4d, 0x8f, 0x90, 0xf3, 0x3f, 0xd1, 0x84, 0x15, + 0x72, 0x35, 0x61, 0x3f, 0x6e, 0xc1, 0xe9, 0x74, 0x3b, 0xcc, 0xfd, 0xef, 0x8b, 0x16, 0x9c, 0x61, + 0xfa, 0x40, 0xd6, 0x6a, 0xa7, 0xf6, 0xf1, 0x85, 0xae, 0x81, 0x22, 0x72, 0x7a, 0x9c, 0xb8, 0xb9, + 0xaf, 0x64, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xbe, 0x00, 0xd3, 0x79, 0x11, 0x26, 0x98, 0x31, + 0xba, 0x73, 0xaf, 0xbe, 0x4d, 0xf6, 0x84, 0xc9, 0x6f, 0x62, 0x8c, 0xce, 0x8b, 0xb1, 0x84, 0xa7, + 0x23, 0x43, 0x17, 0xfa, 0x8b, 0x0c, 0x8d, 0xb6, 0x60, 0x6a, 0x6f, 0x8b, 0xf8, 0xb7, 0xfd, 0xc8, + 0x89, 0xbd, 0x68, 0xc3, 0x63, 0x19, 0xdb, 0xf9, 0xba, 0x79, 0x45, 0x1a, 0xe6, 0xde, 0x4d, 0x23, + 0x1c, 0x1e, 0xcc, 0x5e, 0x30, 0x0a, 0x92, 0x2e, 0xf3, 0x83, 0x04, 0x77, 0x12, 0xed, 0x0c, 0xac, + 0x3d, 0xf0, 0x10, 0x03, 0x6b, 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xe2, 0x3b, 0x74, 0x05, 0x4a, + 0x4e, 0xcb, 0xe3, 0x82, 0x53, 0x71, 0x8c, 0x32, 0x01, 0x40, 0xad, 0xca, 0xc5, 0xa6, 0x0a, 0xaa, + 0x12, 0xf2, 0x16, 0x72, 0x13, 0xf2, 0xf6, 0xcc, 0xaf, 0x6b, 0x7f, 0x8f, 0x05, 0xc2, 0x91, 0xae, + 0x8f, 0xb3, 0xfb, 0x2d, 0x99, 0xcf, 0xdc, 0x48, 0xbe, 0x71, 0x29, 0xdf, 0xb3, 0x50, 0xa4, 0xdc, + 0x50, 0xbc, 0x92, 0x91, 0x68, 0xc3, 0xa0, 0x65, 0xbb, 0x20, 0xa0, 0x15, 0xc2, 0xc4, 0x8e, 0xbd, + 0x7b, 0xf3, 0x1c, 0x80, 0xcb, 0x70, 0xb5, 0xac, 0xc6, 0xea, 0x66, 0xae, 0x28, 0x08, 0xd6, 0xb0, + 0xec, 0x7f, 0x5d, 0x80, 0x11, 0x99, 0xec, 0xa1, 0xed, 0xf7, 0x23, 0x1c, 0x38, 0x52, 0xf6, 0x37, + 0x96, 0x06, 0x9c, 0x12, 0xae, 0x25, 0x32, 0x95, 0x24, 0x0d, 0xb8, 0x04, 0xe0, 0x04, 0x87, 0xee, + 0xa2, 0xa8, 0xbd, 0xce, 0xd0, 0x53, 0x6e, 0x5f, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x7d, 0x1a, 0x26, + 0x79, 0xbd, 0x30, 0x68, 0x39, 0x9b, 0x5c, 0x22, 0x3d, 0xa8, 0xfc, 0xb5, 0x27, 0x57, 0x52, 0xb0, + 0xc3, 0x83, 0xd9, 0xd3, 0xe9, 0x32, 0xa6, 0x6a, 0xe9, 0xa0, 0xc2, 0xcc, 0x37, 0x78, 0x23, 0x74, + 0xf7, 0x77, 0x58, 0x7d, 0x24, 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x07, 0xd4, 0x99, 0xf6, 0x02, 0xbd, + 0xc1, 0x6d, 0xf6, 0xbc, 0x90, 0xb8, 0xdd, 0x54, 0x2f, 0xba, 0x57, 0xb2, 0xf4, 0xd8, 0xe0, 0xb5, + 0xb0, 0xaa, 0x6f, 0xff, 0xff, 0x45, 0x98, 0x4c, 0xfb, 0xa8, 0xa2, 0xeb, 0x30, 0xc4, 0x59, 0x0f, + 0x41, 0xbe, 0x8b, 0x66, 0x5f, 0xf3, 0x6c, 0x65, 0x87, 0xb0, 0xe0, 0x5e, 0x44, 0x7d, 0xf4, 0x36, + 0x8c, 0xb8, 0xc1, 0x9e, 0xbf, 0xe7, 0x84, 0xee, 0x7c, 0xad, 0x2a, 0x96, 0x73, 0xe6, 0x6b, 0xa9, + 0x92, 0xa0, 0xe9, 0xde, 0xb2, 0x4c, 0x8b, 0x95, 0x80, 0xb0, 0x4e, 0x0e, 0xad, 0xb1, 0x28, 0xbd, + 0x1b, 0xde, 0xe6, 0x8a, 0xd3, 0xea, 0x66, 0xc0, 0xbd, 0x28, 0x91, 0x34, 0xca, 0x63, 0x22, 0x94, + 0x2f, 0x07, 0xe0, 0x84, 0x10, 0xfa, 0x36, 0x38, 0x15, 0xe5, 0x08, 0x58, 0xf3, 0xb2, 0x20, 0x75, + 0x93, 0x39, 0x2e, 0x3c, 0x42, 0xdf, 0xb1, 0x59, 0xa2, 0xd8, 0xac, 0x66, 0xec, 0x5f, 0x3b, 0x05, + 0xc6, 0x26, 0x36, 0x92, 0xe2, 0x59, 0xc7, 0x94, 0x14, 0x0f, 0x43, 0x89, 0xec, 0xb4, 0xe2, 0xfd, + 0x8a, 0x17, 0x76, 0xcb, 0xaa, 0xba, 0x24, 0x70, 0x3a, 0x69, 0x4a, 0x08, 0x56, 0x74, 0xb2, 0x33, + 0x17, 0x16, 0xbf, 0x8e, 0x99, 0x0b, 0x07, 0x4e, 0x30, 0x73, 0xe1, 0x2a, 0x0c, 0x6f, 0x7a, 0x31, + 0x26, 0xad, 0x40, 0x30, 0xfd, 0x99, 0xeb, 0xf0, 0x1a, 0x47, 0xe9, 0xcc, 0x91, 0x25, 0x00, 0x58, + 0x12, 0x41, 0x6f, 0xa8, 0x1d, 0x38, 0x94, 0xff, 0x66, 0xee, 0x54, 0x41, 0x67, 0xee, 0x41, 0x91, + 0x9f, 0x70, 0xf8, 0x41, 0xf3, 0x13, 0x2e, 0xcb, 0xac, 0x82, 0xa5, 0x7c, 0x6f, 0x0b, 0x96, 0x34, + 0xb0, 0x47, 0x2e, 0xc1, 0x3b, 0x7a, 0x26, 0xc6, 0x72, 0xfe, 0x49, 0xa0, 0x92, 0x2c, 0xf6, 0x99, + 0x7f, 0xf1, 0x7b, 0x2c, 0x38, 0xd3, 0xca, 0x4a, 0x4a, 0x2a, 0xb4, 0xb5, 0x2f, 0xf6, 0x9d, 0x75, + 0xd5, 0x68, 0x90, 0x09, 0x6a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0x75, 0x57, 0x24, + 0x10, 0x7c, 0x3c, 0x27, 0x91, 0x63, 0x97, 0xf4, 0x8d, 0x6b, 0x19, 0x49, 0x03, 0x3f, 0x9a, 0x97, + 0x34, 0xb0, 0xef, 0x54, 0x81, 0x6f, 0xa8, 0x14, 0x8e, 0x63, 0xf9, 0x4b, 0x89, 0x27, 0x68, 0xec, + 0x99, 0xb8, 0xf1, 0x0d, 0x95, 0xb8, 0xb1, 0x4b, 0x1c, 0x49, 0x9e, 0x96, 0xb1, 0x67, 0xba, 0x46, + 0x2d, 0xe5, 0xe2, 0xc4, 0xf1, 0xa4, 0x5c, 0x34, 0xae, 0x1a, 0x9e, 0xf5, 0xef, 0xa9, 0x1e, 0x57, + 0x8d, 0x41, 0xb7, 0xfb, 0x65, 0xc3, 0xd3, 0x4b, 0x4e, 0x3d, 0x50, 0x7a, 0xc9, 0x3b, 0x7a, 0xba, + 0x46, 0xd4, 0x23, 0x1f, 0x21, 0x45, 0xea, 0x33, 0x49, 0xe3, 0x1d, 0xfd, 0x02, 0x3c, 0x95, 0x4f, + 0x57, 0xdd, 0x73, 0x9d, 0x74, 0x33, 0xaf, 0xc0, 0x8e, 0xe4, 0x8f, 0xa7, 0x4f, 0x26, 0xf9, 0xe3, + 0x99, 0x63, 0x4f, 0xfe, 0x78, 0xf6, 0x04, 0x92, 0x3f, 0x3e, 0x72, 0x82, 0xc9, 0x1f, 0xef, 0x30, + 0x13, 0x07, 0x1e, 0x8e, 0x44, 0xc4, 0xbd, 0xcc, 0x8e, 0xb1, 0x98, 0x15, 0xb3, 0x84, 0x7f, 0x9c, + 0x02, 0xe1, 0x84, 0x54, 0x46, 0x52, 0xc9, 0xe9, 0x87, 0x90, 0x54, 0x72, 0x35, 0x49, 0x2a, 0x79, + 0x2e, 0x7f, 0xaa, 0x33, 0x4c, 0xcb, 0x73, 0x52, 0x49, 0xde, 0xd1, 0x53, 0x40, 0x3e, 0xda, 0x45, + 0x14, 0x9f, 0x25, 0x78, 0xec, 0x92, 0xf8, 0xf1, 0x75, 0x9e, 0xf8, 0xf1, 0x7c, 0xfe, 0x49, 0x9e, + 0xbe, 0xee, 0xcc, 0x74, 0x8f, 0xdf, 0x57, 0x80, 0x8b, 0xdd, 0xf7, 0x45, 0x22, 0xf5, 0xac, 0x25, + 0x1a, 0xc1, 0x94, 0xd4, 0x93, 0xbf, 0xad, 0x12, 0xac, 0xbe, 0x23, 0x55, 0x5d, 0x83, 0x29, 0x65, + 0x3b, 0xde, 0xf4, 0x1a, 0xfb, 0x5a, 0x86, 0x7b, 0xe5, 0x6f, 0x5b, 0x4f, 0x23, 0xe0, 0xce, 0x3a, + 0x68, 0x1e, 0x26, 0x8c, 0xc2, 0x6a, 0x45, 0xbc, 0xa1, 0x94, 0x98, 0xb5, 0x6e, 0x82, 0x71, 0x1a, + 0xdf, 0xfe, 0x69, 0x0b, 0x1e, 0xc9, 0xc9, 0xab, 0xd4, 0x77, 0x20, 0xa6, 0x0d, 0x98, 0x68, 0x99, + 0x55, 0x7b, 0xc4, 0x6b, 0x33, 0xb2, 0x37, 0xa9, 0xbe, 0xa6, 0x00, 0x38, 0x4d, 0xd4, 0xfe, 0x53, + 0x0b, 0x2e, 0x74, 0x35, 0xe3, 0x42, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x62, 0x48, 0x5c, 0xe2, + 0xc7, 0x9e, 0xd3, 0xac, 0xb7, 0x48, 0x43, 0x93, 0x5b, 0x33, 0x7b, 0xa8, 0x6b, 0x2b, 0xf5, 0xf9, + 0x4e, 0x0c, 0x9c, 0x53, 0x13, 0x2d, 0x03, 0xea, 0x84, 0x88, 0x19, 0x66, 0x31, 0x5d, 0x3b, 0xe9, + 0xe1, 0x8c, 0x1a, 0xe8, 0x25, 0x18, 0x53, 0xe6, 0x61, 0xda, 0x8c, 0xb3, 0x03, 0x18, 0xeb, 0x00, + 0x6c, 0xe2, 0x2d, 0x5c, 0xf9, 0x8d, 0xdf, 0xbb, 0xf8, 0x91, 0xdf, 0xfa, 0xbd, 0x8b, 0x1f, 0xf9, + 0xed, 0xdf, 0xbb, 0xf8, 0x91, 0xef, 0xb8, 0x7f, 0xd1, 0xfa, 0x8d, 0xfb, 0x17, 0xad, 0xdf, 0xba, + 0x7f, 0xd1, 0xfa, 0xed, 0xfb, 0x17, 0xad, 0xdf, 0xbd, 0x7f, 0xd1, 0xfa, 0xd2, 0xef, 0x5f, 0xfc, + 0xc8, 0x5b, 0x85, 0xdd, 0x67, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x40, 0x10, 0x5c, + 0xb3, 0xfc, 0x00, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -12425,9 +12429,9 @@ func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) i-- dAtA[i] = 0x1a - i -= len(m.DoNotUse_ExternalID) - copy(dAtA[i:], m.DoNotUse_ExternalID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i -= len(m.DoNotUseExternalID) + copy(dAtA[i:], m.DoNotUseExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUseExternalID))) i-- dAtA[i] = 0x12 i -= len(m.PodCIDR) @@ -14323,6 +14327,14 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 if m.LimitBytes != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) i-- @@ -17640,6 +17652,17 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } if m.IPFamily != nil { i -= len(*m.IPFamily) copy(dAtA[i:], *m.IPFamily) @@ -21055,7 +21078,7 @@ func (m *NodeSpec) Size() (n int) { _ = l l = len(m.PodCIDR) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) + l = len(m.DoNotUseExternalID) n += 1 + l + sovGenerated(uint64(l)) l = len(m.ProviderID) n += 1 + l + sovGenerated(uint64(l)) @@ -21777,6 +21800,7 @@ func (m *PodLogOptions) Size() (n int) { if m.LimitBytes != nil { n += 1 + sovGenerated(uint64(*m.LimitBytes)) } + n += 2 return n } @@ -23005,6 +23029,12 @@ func (m *ServiceSpec) Size() (n int) { l = len(*m.IPFamily) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -25045,7 +25075,7 @@ func (this *NodeSpec) String() string { repeatedStringForTaints += "}" s := strings.Join([]string{`&NodeSpec{`, `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, + `DoNotUseExternalID:` + fmt.Sprintf("%v", this.DoNotUseExternalID) + `,`, `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, `Taints:` + repeatedStringForTaints + `,`, @@ -25546,6 +25576,7 @@ func (this *PodLogOptions) String() string { `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, `}`, }, "") return s @@ -26525,6 +26556,7 @@ func (this *ServiceSpec) String() string { `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, `}`, }, "") return s @@ -44590,7 +44622,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DoNotUseExternalID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44618,7 +44650,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex]) + m.DoNotUseExternalID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -51048,6 +51080,26 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } } m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62214,6 +62266,38 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { s := IPFamily(dAtA[iNdEx:postIndex]) m.IPFamily = &s iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto index b99d1044277..c05e2351004 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto @@ -1142,7 +1142,7 @@ message EnvVar { // EnvVarSource represents a source for the value of an EnvVar. message EnvVarSource { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. // +optional optional ObjectFieldSelector fieldRef = 1; @@ -3145,6 +3145,15 @@ message PodLogOptions { // slightly more or slightly less than the specified limit. // +optional optional int64 limitBytes = 8; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 9; } // PodPortForwardOptions is the query options to a Pod's port forward call @@ -3375,7 +3384,6 @@ message PodSpec { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional optional bool shareProcessNamespace = 27; @@ -4733,6 +4741,21 @@ message ServiceSpec { // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. // +optional optional string ipFamily = 15; + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + repeated string topologyKeys = 16; } // ServiceStatus represents the current status of a service. @@ -5032,7 +5055,6 @@ message VolumeMount { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is beta in 1.15. // +optional optional string subPathExpr = 6; } @@ -5249,7 +5271,7 @@ message WindowsSecurityContextOptions { // Defaults to the user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. // +optional optional string runAsUserName = 3; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index fcd45540290..47a40271e02 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -30,6 +30,8 @@ const ( NamespaceAll string = "" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) NamespaceNodeLease string = "kube-node-lease" + // TopologyKeyAny is the service topology key that matches any node + TopologyKeyAny string = "*" ) // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -1784,7 +1786,6 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is beta in 1.15. // +optional SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } @@ -1847,7 +1848,7 @@ type EnvVar struct { // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests @@ -2941,7 +2942,6 @@ type PodSpec struct { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` @@ -3828,6 +3828,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 ) // ServiceSpec describes the attributes that a user creates on a service. @@ -3942,6 +3944,7 @@ type ServiceSpec struct { // of peer discovery. // +optional PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` + // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` @@ -3955,6 +3958,21 @@ type ServiceSpec struct { // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. // +optional IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` } // ServicePort contains information on service's port. @@ -4233,7 +4251,7 @@ type NodeSpec struct { // Deprecated. Not all kubelets will set this field. Remove field after 1.13. // see: https://issues.k8s.io/61966 // +optional - DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` } // NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. @@ -4660,6 +4678,12 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) +const ( + // NamespaceTerminatingCause is returned as a defaults.cause item when a change is + // forbidden due to the namespace being terminated. + NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" +) + type NamespaceConditionType string // These are valid conditions of a namespace. @@ -4670,6 +4694,10 @@ const ( NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" + // NamespaceContentRemaining contains information about resources remaining in a namespace. + NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining" + // NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace. + NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining" ) // NamespaceCondition contains details about state of namespace. @@ -4765,6 +4793,7 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodLogOptions is the query options for a Pod's logs REST call. @@ -4805,8 +4834,18 @@ type PodLogOptions struct { // slightly more or slightly less than the specified limit. // +optional LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodAttachOptions is the query options to a Pod's remote attach call. @@ -4844,6 +4883,7 @@ type PodAttachOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodExecOptions is the query options to a Pod's remote exec call. @@ -4882,6 +4922,7 @@ type PodExecOptions struct { Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodPortForwardOptions is the query options to a Pod's port forward call @@ -4899,6 +4940,7 @@ type PodPortForwardOptions struct { Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodProxyOptions is the query options to a Pod's proxy call. @@ -4910,6 +4952,7 @@ type PodProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NodeProxyOptions is the query options to a Node's proxy call. @@ -4921,6 +4964,7 @@ type NodeProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceProxyOptions is the query options to a Service's proxy call. @@ -5764,7 +5808,7 @@ type WindowsSecurityContextOptions struct { // Defaults to the user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. // +optional RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 35b8389a750..441d3e1088e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -566,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string { var map_EnvVarSource = map[string]string{ "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", @@ -1528,15 +1528,16 @@ func (PodList) SwaggerDoc() map[string]string { } var map_PodLogOptions = map[string]string{ - "": "PodLogOptions is the query options for a Pod's logs REST call.", - "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow the log stream of the pod. Defaults to false.", - "previous": "Return previous terminated container logs. Defaults to false.", - "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", } func (PodLogOptions) SwaggerDoc() map[string]string { @@ -1613,7 +1614,7 @@ var map_PodSpec = map[string]string{ "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", - "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", + "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", @@ -2203,6 +2204,7 @@ var map_ServiceSpec = map[string]string{ "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2366,7 +2368,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2456,7 +2458,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", - "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_labels.go index 3287fb51fc2..22aa55b9111 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -17,15 +17,23 @@ limitations under the License. package v1 const ( - LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + LabelHostname = "kubernetes.io/hostname" - LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" + LabelZoneRegionStable = "topology.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelInstanceTypeStable = "node.kubernetes.io/instance-type" LabelOSStable = "kubernetes.io/os" LabelArchStable = "kubernetes.io/arch" + // LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0. + // It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763) + LabelWindowsBuild = "node.kubernetes.io/windows-build" + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_taints.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_taints.go new file mode 100644 index 00000000000..e390519280f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/well_known_taints.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // TaintNodeNotReady will be added when node is not ready + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes ready. + TaintNodeNotReady = "node.kubernetes.io/not-ready" + + // TaintNodeUnreachable will be added when node becomes unreachable + // (corresponding to NodeReady status ConditionUnknown) + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.kubernetes.io/unreachable" + + // TaintNodeUnschedulable will be added when node becomes unschedulable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node becomes scheduable. + TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" + + // TaintNodeMemoryPressure will be added when node has memory pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough memory. + TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" + + // TaintNodeDiskPressure will be added when node has disk pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" + + // TaintNodeNetworkUnavailable will be added when node's network is unavailable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when network becomes ready. + TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" + + // TaintNodePIDPressure will be added when node has pid pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index fd47019c034..ac4855abc41 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -5186,6 +5186,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(IPFamily) **out = **in } + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/BUILD index cc76727f871..283479e10c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/BUILD @@ -8,6 +8,7 @@ go_library( "register.go", "types.go", "types_swagger_doc_generated.go", + "well_known_labels.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1", diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go index 34f6b28d71f..fa4d3ac5042 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -200,53 +200,54 @@ func init() { } var fileDescriptor_772f83c5b34e07a5 = []byte{ - // 728 bytes of a gzipped FileDescriptorProto + // 746 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, - 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2, - 0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e, - 0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20, - 0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57, - 0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83, - 0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22, - 0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26, - 0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff, - 0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43, - 0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94, - 0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99, - 0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1, - 0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46, - 0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77, - 0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26, - 0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4, - 0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1, - 0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58, - 0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c, - 0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7, - 0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88, - 0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, - 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51, - 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a, - 0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7, - 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa, - 0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80, - 0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61, - 0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d, - 0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f, - 0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca, - 0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68, - 0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2, - 0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c, - 0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13, - 0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5, - 0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25, - 0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37, - 0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e, - 0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59, - 0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d, - 0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f, - 0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf, - 0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00, + 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca, + 0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba, + 0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0, + 0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a, + 0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10, + 0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64, + 0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24, + 0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97, + 0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88, + 0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca, + 0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c, + 0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58, + 0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23, + 0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb, + 0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13, + 0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2, + 0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68, + 0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c, + 0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e, + 0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3, + 0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88, + 0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, + 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51, + 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a, + 0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7, + 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55, + 0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00, + 0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3, + 0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a, + 0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37, + 0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2, + 0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31, + 0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b, + 0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3, + 0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35, + 0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3, + 0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79, + 0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31, + 0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f, + 0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5, + 0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b, + 0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96, + 0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, + 0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, + 0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -387,6 +388,13 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } if m.Port != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) i-- @@ -429,13 +437,11 @@ func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.AddressType != nil { - i -= len(*m.AddressType) - copy(dAtA[i:], *m.AddressType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType))) - i-- - dAtA[i] = 0x22 - } + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 if len(m.Ports) > 0 { for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { { @@ -597,6 +603,10 @@ func (m *EndpointPort) Size() (n int) { if m.Port != nil { n += 1 + sovGenerated(uint64(*m.Port)) } + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -620,10 +630,8 @@ func (m *EndpointSlice) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.AddressType != nil { - l = len(*m.AddressType) - n += 1 + l + sovGenerated(uint64(l)) - } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -692,6 +700,7 @@ func (this *EndpointPort) String() string { `Name:` + valueToStringGenerated(this.Name) + `,`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, `Port:` + valueToStringGenerated(this.Port) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, `}`, }, "") return s @@ -714,7 +723,7 @@ func (this *EndpointSlice) String() string { `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, `Endpoints:` + repeatedStringForEndpoints + `,`, `Ports:` + repeatedStringForPorts + `,`, - `AddressType:` + valueToStringGenerated(this.AddressType) + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, `}`, }, "") return s @@ -1246,6 +1255,39 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } } m.Port = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1430,8 +1472,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := AddressType(dAtA[iNdEx:postIndex]) - m.AddressType = &s + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.proto index f14f37fd046..62074e7a7b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -32,11 +32,10 @@ option go_package = "v1alpha1"; // Endpoint represents a single logical "backend" implementing a service. message Endpoint { // addresses of this endpoint. The contents of this field are interpreted - // according to the corresponding EndpointSlice addressType field. This - // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers - // (e.g. kube-proxy) must handle different types of addresses in the context - // of their own capabilities. This must contain at least one address but no - // more than 100. + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. // +listType=set repeated string addresses = 1; @@ -87,11 +86,10 @@ message EndpointPort { // The name of this port. All ports in an EndpointSlice must have a unique // name. If the EndpointSlice is dervied from a Kubernetes service, this // corresponds to the Service.ports[].name. - // Name must either be an empty string or pass IANA_SVC_NAME validation: - // * must be no more than 15 characters long - // * may contain only [-a-z0-9] - // * must contain at least one letter [a-z] - // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. // Default is empty string. optional string name = 1; @@ -104,6 +102,14 @@ message EndpointPort { // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + optional string appProtocol = 4; } // EndpointSlice represents a subset of the endpoints that implement a service. @@ -115,9 +121,12 @@ message EndpointSlice { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. - // All addresses in this slice must be the same type. - // Default is IP - // +optional + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. optional string addressType = 4; // endpoints is a list of unique endpoints in this slice. Each slice may diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types.go index 0de6082aeef..fff30b5c7a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -33,10 +33,13 @@ type EndpointSlice struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // addressType specifies the type of address carried by this EndpointSlice. - // All addresses in this slice must be the same type. - // Default is IP - // +optional - AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic @@ -56,17 +59,26 @@ type AddressType string const ( // AddressTypeIP represents an IP Address. + // This address type has been deprecated and has been replaced by the IPv4 + // and IPv6 adddress types. New resources with this address type will be + // considered invalid. This will be fully removed in 1.18. + // +deprecated AddressTypeIP = AddressType("IP") + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") ) // Endpoint represents a single logical "backend" implementing a service. type Endpoint struct { // addresses of this endpoint. The contents of this field are interpreted - // according to the corresponding EndpointSlice addressType field. This - // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers - // (e.g. kube-proxy) must handle different types of addresses in the context - // of their own capabilities. This must contain at least one address but no - // more than 100. + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` // conditions contains information about the current status of the endpoint. @@ -113,11 +125,10 @@ type EndpointPort struct { // The name of this port. All ports in an EndpointSlice must have a unique // name. If the EndpointSlice is dervied from a Kubernetes service, this // corresponds to the Service.ports[].name. - // Name must either be an empty string or pass IANA_SVC_NAME validation: - // * must be no more than 15 characters long - // * may contain only [-a-z0-9] - // * must contain at least one letter [a-z] - // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` // The IP protocol for this port. @@ -128,6 +139,13 @@ type EndpointPort struct { // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go index a524bcd68e5..1ba2d60d4a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", - "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", @@ -50,10 +50,11 @@ func (EndpointConditions) SwaggerDoc() map[string]string { } var map_EndpointPort = map[string]string{ - "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -63,7 +64,7 @@ func (EndpointPort) SwaggerDoc() map[string]string { var map_EndpointSlice = map[string]string{ "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", "metadata": "Standard object's metadata.", - "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go new file mode 100644 index 00000000000..8f9c72f088e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go index e424fccf3b3..c72f64acfd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -103,6 +103,11 @@ func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { *out = new(int32) **out = **in } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } return } @@ -121,11 +126,6 @@ func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.AddressType != nil { - in, out := &in.AddressType, &out.AddressType - *out = new(AddressType) - **out = **in - } if in.Endpoints != nil { in, out := &in.Endpoints, &out.Endpoints *out = make([]Endpoint, len(*in)) diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/BUILD new file mode 100644 index 00000000000..b6a9064a62b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "well_known_labels.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1", + importpath = "k8s.io/api/discovery/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/doc.go similarity index 73% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go rename to cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/doc.go index a4a1c035fc8..9b54d1b94ce 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +groupName=discovery.k8s.io -// Package version supplies version information collected at build time to -// kubernetes components. -package version // import "k8s.io/kubernetes/pkg/version" +package v1beta1 // import "k8s.io/api/discovery/v1beta1" diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go new file mode 100644 index 00000000000..2283d12d658 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -0,0 +1,1730 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1beta1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1beta1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1beta1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_ece80bbc872d519b) +} + +var fileDescriptor_ece80bbc872d519b = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x48, + 0x14, 0xb6, 0xe2, 0x88, 0x95, 0xc6, 0x31, 0x9b, 0x0c, 0x7b, 0x30, 0xde, 0x20, 0x19, 0x2f, 0x2c, + 0x66, 0x43, 0xa4, 0x75, 0xc8, 0x2e, 0x61, 0xf7, 0x14, 0xed, 0x86, 0xb6, 0xd0, 0x36, 0x66, 0x1a, + 0x28, 0x94, 0x1e, 0x3a, 0x96, 0x26, 0xb2, 0x6a, 0x5b, 0x23, 0x34, 0x63, 0x83, 0x6f, 0xfd, 0x13, + 0xfa, 0xf7, 0xf4, 0x5a, 0x28, 0x39, 0xe6, 0x98, 0x93, 0xa8, 0xd5, 0xff, 0x22, 0xa7, 0x32, 0xa3, + 0x5f, 0x76, 0xdd, 0x1f, 0xbe, 0xcd, 0x7c, 0xf3, 0xbe, 0xef, 0xbd, 0xf7, 0xcd, 0x7b, 0xe0, 0x62, + 0x7c, 0xc6, 0xac, 0x80, 0xda, 0xe3, 0xd9, 0x90, 0xc4, 0x21, 0xe1, 0x84, 0xd9, 0x73, 0x12, 0x7a, + 0x34, 0xb6, 0xf3, 0x07, 0x1c, 0x05, 0xb6, 0x17, 0x30, 0x97, 0xce, 0x49, 0xbc, 0xb0, 0xe7, 0xfd, + 0x21, 0xe1, 0xb8, 0x6f, 0xfb, 0x24, 0x24, 0x31, 0xe6, 0xc4, 0xb3, 0xa2, 0x98, 0x72, 0x0a, 0x0f, + 0xb3, 0x68, 0x0b, 0x47, 0x81, 0x55, 0x46, 0x5b, 0x79, 0x74, 0xfb, 0xd8, 0x0f, 0xf8, 0x68, 0x36, + 0xb4, 0x5c, 0x3a, 0xb5, 0x7d, 0xea, 0x53, 0x5b, 0x92, 0x86, 0xb3, 0x6b, 0x79, 0x93, 0x17, 0x79, + 0xca, 0xc4, 0xda, 0xdd, 0x95, 0xd4, 0x2e, 0x8d, 0x89, 0x3d, 0xdf, 0x48, 0xd8, 0x3e, 0xad, 0x62, + 0xa6, 0xd8, 0x1d, 0x05, 0xa1, 0xa8, 0x2e, 0x1a, 0xfb, 0x02, 0x60, 0xf6, 0x94, 0x70, 0xfc, 0x35, + 0x96, 0xfd, 0x2d, 0x56, 0x3c, 0x0b, 0x79, 0x30, 0x25, 0x1b, 0x84, 0xbf, 0x7f, 0x44, 0x60, 0xee, + 0x88, 0x4c, 0xf1, 0x97, 0xbc, 0xee, 0xbb, 0x3a, 0xd0, 0x2e, 0x42, 0x2f, 0xa2, 0x41, 0xc8, 0xe1, + 0x11, 0xd0, 0xb1, 0xe7, 0xc5, 0x84, 0x31, 0xc2, 0x5a, 0x4a, 0xa7, 0xde, 0xd3, 0x9d, 0x66, 0x9a, + 0x98, 0xfa, 0x79, 0x01, 0xa2, 0xea, 0x1d, 0x7a, 0x00, 0xb8, 0x34, 0xf4, 0x02, 0x1e, 0xd0, 0x90, + 0xb5, 0x76, 0x3a, 0x4a, 0xaf, 0x71, 0xf2, 0xa7, 0xf5, 0x3d, 0x7b, 0xad, 0x22, 0xd1, 0x7f, 0x25, + 0xcf, 0x81, 0x37, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5a, 0xd1, 0x85, 0x3d, 0xa0, 0x8d, + 0x28, 0xe3, 0x21, 0x9e, 0x92, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x67, 0x2f, 0x4d, 0x4c, 0xed, 0x61, + 0x8e, 0xa1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x71, 0xec, 0x13, 0x8e, 0xc8, 0x75, 0x6b, 0x57, 0x96, + 0xf3, 0xdb, 0x6a, 0x39, 0xe2, 0x83, 0xac, 0x79, 0xdf, 0xba, 0x1c, 0xbe, 0x26, 0xae, 0x08, 0x22, + 0x31, 0x09, 0x5d, 0x92, 0x75, 0x78, 0x55, 0x30, 0x51, 0x25, 0x02, 0x87, 0x40, 0xe3, 0x34, 0xa2, + 0x13, 0xea, 0x2f, 0x5a, 0x6a, 0xa7, 0xde, 0x6b, 0x9c, 0x9c, 0x6e, 0xd7, 0x9f, 0x75, 0x95, 0xd3, + 0x2e, 0x42, 0x1e, 0x2f, 0x9c, 0xfd, 0xbc, 0x47, 0xad, 0x80, 0x51, 0xa9, 0xdb, 0xfe, 0x17, 0x34, + 0xd7, 0x82, 0xe1, 0x3e, 0xa8, 0x8f, 0xc9, 0xa2, 0xa5, 0x88, 0x5e, 0x91, 0x38, 0xc2, 0x5f, 0x80, + 0x3a, 0xc7, 0x93, 0x19, 0x91, 0x1e, 0xeb, 0x28, 0xbb, 0xfc, 0xb3, 0x73, 0xa6, 0x74, 0xff, 0x02, + 0x70, 0xd3, 0x52, 0x68, 0x02, 0x35, 0x26, 0xd8, 0xcb, 0x34, 0x34, 0x47, 0x4f, 0x13, 0x53, 0x45, + 0x02, 0x40, 0x19, 0xde, 0xfd, 0xa0, 0x80, 0xbd, 0x82, 0x37, 0xa0, 0x31, 0x87, 0x87, 0x60, 0x57, + 0x1a, 0x2c, 0x93, 0x3a, 0x5a, 0x9a, 0x98, 0xbb, 0x4f, 0x85, 0xb9, 0x12, 0x85, 0x0f, 0x80, 0x26, + 0x67, 0xc5, 0xa5, 0x93, 0xac, 0x04, 0xe7, 0x48, 0x34, 0x33, 0xc8, 0xb1, 0xfb, 0xc4, 0xfc, 0x75, + 0x73, 0x0f, 0xac, 0xe2, 0x19, 0x95, 0x64, 0x91, 0x26, 0xa2, 0x31, 0x97, 0xff, 0xa8, 0x66, 0x69, + 0x44, 0x7a, 0x24, 0x51, 0xd8, 0x07, 0x0d, 0x1c, 0x45, 0x05, 0x4d, 0xfe, 0xa0, 0xee, 0xfc, 0x9c, + 0x26, 0x66, 0xe3, 0xbc, 0x82, 0xd1, 0x6a, 0x4c, 0x77, 0xb9, 0x03, 0x9a, 0x45, 0x23, 0xcf, 0x26, + 0x81, 0x4b, 0xe0, 0x2b, 0xa0, 0x89, 0x95, 0xf2, 0x30, 0xc7, 0xb2, 0x9b, 0xf5, 0x91, 0x2c, 0x37, + 0xc3, 0x8a, 0xc6, 0xbe, 0x00, 0x98, 0x25, 0xa2, 0xab, 0xa9, 0x78, 0x42, 0x38, 0xae, 0x46, 0xb2, + 0xc2, 0x50, 0xa9, 0x0a, 0xff, 0x07, 0x8d, 0x7c, 0x07, 0xae, 0x16, 0x11, 0xc9, 0xcb, 0xec, 0xe6, + 0x94, 0xc6, 0x79, 0xf5, 0x74, 0xbf, 0x7e, 0x45, 0xab, 0x34, 0xf8, 0x1c, 0xe8, 0x24, 0x2f, 0x5c, + 0xec, 0x8e, 0x98, 0xad, 0xdf, 0xb7, 0x9b, 0x2d, 0xe7, 0x20, 0xcf, 0xa5, 0x17, 0x08, 0x43, 0x95, + 0x16, 0xbc, 0x04, 0xaa, 0x70, 0x93, 0xb5, 0xea, 0x52, 0xf4, 0x8f, 0xed, 0x44, 0xc5, 0x37, 0x38, + 0xcd, 0x5c, 0x58, 0x15, 0x37, 0x86, 0x32, 0x9d, 0xee, 0x7b, 0x05, 0x1c, 0xac, 0x79, 0xfc, 0x38, + 0x60, 0x1c, 0xbe, 0xdc, 0xf0, 0xd9, 0xda, 0xce, 0x67, 0xc1, 0x96, 0x2e, 0x97, 0x4b, 0x51, 0x20, + 0x2b, 0x1e, 0x0f, 0x80, 0x1a, 0x70, 0x32, 0x2d, 0x9c, 0x39, 0xda, 0xae, 0x09, 0x59, 0x5d, 0xd5, + 0xc5, 0x23, 0xa1, 0x80, 0x32, 0x21, 0xe7, 0xf8, 0x66, 0x69, 0xd4, 0x6e, 0x97, 0x46, 0xed, 0x6e, + 0x69, 0xd4, 0xde, 0xa4, 0x86, 0x72, 0x93, 0x1a, 0xca, 0x6d, 0x6a, 0x28, 0x77, 0xa9, 0xa1, 0x7c, + 0x4c, 0x0d, 0xe5, 0xed, 0x27, 0xa3, 0xf6, 0xe2, 0xa7, 0x5c, 0xf2, 0x73, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x29, 0x1a, 0xa2, 0x6f, 0x6d, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto new file mode 100644 index 00000000000..cce6f970da8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + optional string appProtocol = 4; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/register.go new file mode 100644 index 00000000000..fc993c5f2d5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go new file mode 100644 index 00000000000..e3dc56539c0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + // This address type has been deprecated and has been replaced by the IPv4 + // and IPv6 adddress types. New resources with this address type will be + // considered invalid. This will be fully removed in 1.18. + // +deprecated + AddressTypeIP = AddressType("IP") + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000000..9dd3a03522c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,86 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go new file mode 100644 index 00000000000..b0caa3c6e71 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..8490ec73fb2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go index 8f581ef1d59..eb255341f40 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -985,7 +985,7 @@ type AllowedHostPath struct { // Deprecated: use FSType from policy API Group instead. type FSType string -var ( +const ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/BUILD new file mode 100644 index 00000000000..0d89c0d9506 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1", + importpath = "k8s.io/api/flowcontrol/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go new file mode 100644 index 00000000000..31b8b5d5353 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go new file mode 100644 index 00000000000..d44ec3c91a0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -0,0 +1,5459 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) +} + +var fileDescriptor_45ba024d525b289b = []byte{ + // 1502 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x59, 0x27, 0xb0, 0xe0, 0x00, 0x92, 0xc3, 0x17, 0x78, + 0x93, 0xf7, 0x4d, 0x42, 0xc6, 0x69, 0x92, 0xa6, 0x08, 0x8a, 0xc0, 0x74, 0xda, 0x7c, 0xd9, 0xae, + 0xbd, 0x4e, 0x52, 0x34, 0x48, 0x81, 0xd0, 0xd4, 0x5a, 0xda, 0x58, 0x22, 0x59, 0x2e, 0xa9, 0xd4, + 0x45, 0x0e, 0x05, 0xfa, 0x07, 0xfa, 0x03, 0x72, 0xec, 0xa1, 0xe7, 0xfe, 0x82, 0x1e, 0x8d, 0xa2, + 0x87, 0x1c, 0x73, 0x12, 0x62, 0xf5, 0x5a, 0xf4, 0xdc, 0xe6, 0x54, 0xec, 0x72, 0x49, 0x8a, 0xfa, + 0xb0, 0x94, 0x1a, 0xc8, 0xa9, 0x37, 0x71, 0x3e, 0x9e, 0xd9, 0x99, 0x9d, 0x99, 0x7d, 0x04, 0x77, + 0xf6, 0xae, 0x33, 0x8d, 0x3a, 0xfa, 0x5e, 0xb0, 0x43, 0x3c, 0x9b, 0xf8, 0x84, 0xe9, 0x0d, 0x62, + 0x97, 0x1d, 0x4f, 0x97, 0x0a, 0xd3, 0xa5, 0xfa, 0x6e, 0xcd, 0x79, 0x6e, 0x39, 0xb6, 0xef, 0x39, + 0x35, 0xbd, 0xb1, 0x6c, 0xd6, 0xdc, 0xaa, 0xb9, 0xac, 0x57, 0x88, 0x4d, 0x3c, 0xd3, 0x27, 0x65, + 0xcd, 0xf5, 0x1c, 0xdf, 0x41, 0xa5, 0xd0, 0x41, 0x33, 0x5d, 0xaa, 0xb5, 0x39, 0x68, 0x91, 0xc3, + 0xe2, 0xc5, 0x0a, 0xf5, 0xab, 0xc1, 0x8e, 0x66, 0x39, 0x75, 0xbd, 0xe2, 0x54, 0x1c, 0x5d, 0xf8, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x21, 0xde, 0xe2, 0x95, 0xe4, 0x00, 0x75, 0xd3, + 0xaa, 0x52, 0x9b, 0x78, 0xfb, 0xba, 0xbb, 0x57, 0xe1, 0x02, 0xa6, 0xd7, 0x89, 0x6f, 0xea, 0x8d, + 0xae, 0x53, 0x2c, 0xea, 0xfd, 0xbc, 0xbc, 0xc0, 0xf6, 0x69, 0x9d, 0x74, 0x39, 0x5c, 0x1b, 0xe4, + 0xc0, 0xac, 0x2a, 0xa9, 0x9b, 0x9d, 0x7e, 0xea, 0x63, 0x58, 0xf8, 0xb4, 0xe6, 0x3c, 0xbf, 0x45, + 0x99, 0x4f, 0xed, 0x4a, 0x40, 0x59, 0x95, 0x78, 0xeb, 0xc4, 0xaf, 0x3a, 0x65, 0x74, 0x13, 0xb2, + 0xfe, 0xbe, 0x4b, 0x0a, 0xca, 0x92, 0x72, 0x2e, 0x6f, 0x9c, 0x3f, 0x68, 0x96, 0x46, 0x5a, 0xcd, + 0x52, 0xf6, 0xc1, 0xbe, 0x4b, 0xde, 0x36, 0x4b, 0xa7, 0xfb, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, + 0xcb, 0x0c, 0x00, 0xb7, 0xda, 0x16, 0xa1, 0xd1, 0x53, 0x98, 0xe0, 0xe9, 0x96, 0x4d, 0xdf, 0x14, + 0x98, 0x93, 0x97, 0x2f, 0x69, 0x49, 0xb1, 0xe3, 0x53, 0x6b, 0xee, 0x5e, 0x85, 0x0b, 0x98, 0xc6, + 0xad, 0xb5, 0xc6, 0xb2, 0xf6, 0xd9, 0xce, 0x33, 0x62, 0xf9, 0xeb, 0xc4, 0x37, 0x0d, 0x24, 0x4f, + 0x01, 0x89, 0x0c, 0xc7, 0xa8, 0x68, 0x0b, 0xb2, 0xcc, 0x25, 0x56, 0x21, 0x23, 0xd0, 0x75, 0x6d, + 0xc0, 0x55, 0x6a, 0xc9, 0xe1, 0xb6, 0x5d, 0x62, 0x19, 0x53, 0x51, 0x8a, 0xfc, 0x0b, 0x0b, 0x28, + 0xf4, 0x05, 0x8c, 0x31, 0xdf, 0xf4, 0x03, 0x56, 0x18, 0x15, 0xa0, 0xcb, 0xef, 0x02, 0x2a, 0x1c, + 0x8d, 0x19, 0x09, 0x3b, 0x16, 0x7e, 0x63, 0x09, 0xa8, 0xbe, 0xce, 0xc0, 0x7c, 0x62, 0xbc, 0xea, + 0xd8, 0x65, 0xea, 0x53, 0xc7, 0x46, 0x37, 0x52, 0x75, 0x3f, 0xdb, 0x51, 0xf7, 0x85, 0x1e, 0x2e, + 0x49, 0xcd, 0xd1, 0x47, 0xf1, 0x79, 0x33, 0xc2, 0xfd, 0x4c, 0x3a, 0xf8, 0xdb, 0x66, 0x69, 0x36, + 0x76, 0x4b, 0x9f, 0x07, 0x35, 0x00, 0xd5, 0x4c, 0xe6, 0x3f, 0xf0, 0x4c, 0x9b, 0x85, 0xb0, 0xb4, + 0x4e, 0x64, 0xda, 0xff, 0x1f, 0xee, 0xa6, 0xb8, 0x87, 0xb1, 0x28, 0x43, 0xa2, 0xb5, 0x2e, 0x34, + 0xdc, 0x23, 0x02, 0xfa, 0x2f, 0x8c, 0x79, 0xc4, 0x64, 0x8e, 0x5d, 0xc8, 0x8a, 0x23, 0xc7, 0xf5, + 0xc2, 0x42, 0x8a, 0xa5, 0x16, 0xfd, 0x0f, 0xc6, 0xeb, 0x84, 0x31, 0xb3, 0x42, 0x0a, 0x39, 0x61, + 0x38, 0x2b, 0x0d, 0xc7, 0xd7, 0x43, 0x31, 0x8e, 0xf4, 0xea, 0xcf, 0x0a, 0xcc, 0x24, 0x75, 0x5a, + 0xa3, 0xcc, 0x47, 0x4f, 0xba, 0xba, 0x4f, 0x1b, 0x2e, 0x27, 0xee, 0x2d, 0x7a, 0x6f, 0x4e, 0x86, + 0x9b, 0x88, 0x24, 0x6d, 0x9d, 0xb7, 0x09, 0x39, 0xea, 0x93, 0x3a, 0xaf, 0xfa, 0xe8, 0xb9, 0xc9, + 0xcb, 0xe7, 0xdf, 0xa1, 0x4b, 0x8c, 0x69, 0x89, 0x9b, 0xbb, 0xcb, 0x11, 0x70, 0x08, 0xa4, 0xfe, + 0x3e, 0xda, 0x9e, 0x02, 0xef, 0x48, 0xf4, 0xa3, 0x02, 0x8b, 0xae, 0x47, 0x1d, 0x8f, 0xfa, 0xfb, + 0x6b, 0xa4, 0x41, 0x6a, 0xab, 0x8e, 0xbd, 0x4b, 0x2b, 0x81, 0x67, 0xf2, 0x5a, 0xca, 0xac, 0x6e, + 0x0d, 0x0c, 0xbd, 0xd9, 0x17, 0x02, 0x93, 0x5d, 0xe2, 0x11, 0xdb, 0x22, 0x86, 0x2a, 0xcf, 0xb4, + 0x78, 0x84, 0xf1, 0x11, 0x67, 0x41, 0xf7, 0x00, 0xd5, 0x4d, 0x9f, 0xd7, 0xb4, 0xb2, 0xe9, 0x11, + 0x8b, 0x94, 0x39, 0xaa, 0x68, 0xc9, 0x5c, 0xd2, 0x1f, 0xeb, 0x5d, 0x16, 0xb8, 0x87, 0x17, 0xfa, + 0x4e, 0x81, 0xf9, 0x72, 0xf7, 0xa2, 0x91, 0x9d, 0x79, 0x7d, 0xa8, 0x52, 0xf7, 0x58, 0x54, 0xc6, + 0x42, 0xab, 0x59, 0x9a, 0xef, 0xa1, 0xc0, 0xbd, 0xa2, 0xa1, 0x2f, 0x21, 0xe7, 0x05, 0x35, 0xc2, + 0x0a, 0x59, 0x71, 0xc3, 0x83, 0xc3, 0x6e, 0x3a, 0x35, 0x6a, 0xed, 0x63, 0xee, 0xf3, 0x39, 0xf5, + 0xab, 0xdb, 0x81, 0xd8, 0x58, 0x2c, 0xb9, 0x6e, 0xa1, 0xc2, 0x21, 0xaa, 0xfa, 0x02, 0xe6, 0x3a, + 0x17, 0x07, 0xaa, 0x02, 0x58, 0xd1, 0xac, 0xb2, 0x82, 0x22, 0xe2, 0x5e, 0x79, 0x87, 0xce, 0x8a, + 0x07, 0x3d, 0x59, 0x9b, 0xb1, 0x88, 0xe1, 0x36, 0x6c, 0xf5, 0x12, 0x4c, 0xdd, 0xf6, 0x9c, 0xc0, + 0x95, 0x87, 0x44, 0x4b, 0x90, 0xb5, 0xcd, 0x7a, 0xb4, 0x82, 0xe2, 0xbd, 0xb8, 0x61, 0xd6, 0x09, + 0x16, 0x1a, 0xf5, 0x07, 0x05, 0xa6, 0xd7, 0x68, 0x9d, 0xfa, 0x98, 0x30, 0xd7, 0xb1, 0x19, 0x41, + 0x57, 0x53, 0x6b, 0xeb, 0x4c, 0xc7, 0xda, 0x3a, 0x91, 0x32, 0x6e, 0x5b, 0x58, 0x4f, 0x60, 0xfc, + 0xab, 0x80, 0x04, 0xd4, 0xae, 0xc8, 0xb5, 0x7d, 0x75, 0x60, 0x86, 0x5b, 0xa1, 0x7d, 0xaa, 0xe3, + 0x8c, 0x49, 0xbe, 0x08, 0xa4, 0x06, 0x47, 0x90, 0xea, 0x1f, 0x0a, 0x9c, 0x11, 0x91, 0x49, 0xb9, + 0x7f, 0x27, 0xa3, 0x27, 0x50, 0x30, 0x19, 0x0b, 0x3c, 0x52, 0x5e, 0x75, 0x6c, 0x2b, 0xf0, 0xf8, + 0x0c, 0xec, 0x6f, 0x57, 0x4d, 0x8f, 0x30, 0x91, 0x4e, 0xce, 0x58, 0x92, 0xe9, 0x14, 0x56, 0xfa, + 0xd8, 0xe1, 0xbe, 0x08, 0x68, 0x0f, 0xa6, 0x6b, 0xed, 0xc9, 0xcb, 0x3c, 0xb5, 0x81, 0x79, 0xa6, + 0x4a, 0x66, 0x9c, 0x92, 0x47, 0x48, 0x97, 0x1d, 0xa7, 0xb1, 0xd5, 0xe7, 0x70, 0x6a, 0x83, 0x0f, + 0x32, 0x73, 0x02, 0xcf, 0x22, 0x49, 0x0f, 0xa2, 0x12, 0xe4, 0x1a, 0xc4, 0xdb, 0x09, 0xfb, 0x28, + 0x6f, 0xe4, 0x79, 0x07, 0x3e, 0xe2, 0x02, 0x1c, 0xca, 0xd1, 0xc7, 0x30, 0x6b, 0x27, 0x9e, 0x0f, + 0xf1, 0x1a, 0x2b, 0x8c, 0x09, 0xd3, 0xf9, 0x56, 0xb3, 0x34, 0xbb, 0x91, 0x56, 0xe1, 0x4e, 0x5b, + 0xf5, 0x30, 0x03, 0x0b, 0x7d, 0x5a, 0x1e, 0x3d, 0x82, 0x09, 0x26, 0x7f, 0xcb, 0x36, 0x3e, 0x37, + 0x30, 0x79, 0xe9, 0x9c, 0x6c, 0xdd, 0x08, 0x0d, 0xc7, 0x58, 0xc8, 0x85, 0x69, 0x4f, 0x9e, 0x41, + 0x04, 0x95, 0xdb, 0xf7, 0x83, 0x81, 0xe0, 0xdd, 0xf5, 0x49, 0xca, 0x8b, 0xdb, 0x11, 0x71, 0x3a, + 0x00, 0x7a, 0x01, 0x73, 0x6d, 0x89, 0x87, 0x41, 0x47, 0x45, 0xd0, 0x6b, 0x03, 0x83, 0xf6, 0xbc, + 0x17, 0xa3, 0x20, 0xe3, 0xce, 0x6d, 0x74, 0xe0, 0xe2, 0xae, 0x48, 0xea, 0xaf, 0x19, 0x38, 0x62, + 0x21, 0xbf, 0x07, 0x82, 0x65, 0xa6, 0x08, 0xd6, 0xcd, 0x63, 0x3c, 0x35, 0x7d, 0x09, 0x17, 0xed, + 0x20, 0x5c, 0x2b, 0xc7, 0x09, 0x72, 0x34, 0x01, 0xfb, 0x33, 0x03, 0xff, 0xe9, 0xef, 0x9c, 0x10, + 0xb2, 0xfb, 0xa9, 0xcd, 0xf6, 0x61, 0xc7, 0x66, 0x3b, 0x3b, 0x04, 0xc4, 0xbf, 0x04, 0xad, 0x83, + 0xa0, 0xbd, 0x51, 0xa0, 0xd8, 0xbf, 0x6e, 0xef, 0x81, 0xb0, 0x3d, 0x4d, 0x13, 0xb6, 0x1b, 0xc7, + 0xe8, 0xb2, 0x3e, 0x04, 0xee, 0xf6, 0x51, 0xcd, 0x15, 0x33, 0xad, 0x21, 0x9e, 0xda, 0x83, 0x23, + 0x6b, 0x25, 0x98, 0xe1, 0x80, 0xbf, 0x0c, 0x29, 0xef, 0x4f, 0x6c, 0x73, 0xa7, 0x46, 0xea, 0xc4, + 0xf6, 0x65, 0x47, 0x52, 0x18, 0xaf, 0x85, 0x4f, 0xa4, 0x9c, 0x6b, 0x63, 0xb8, 0x97, 0xe9, 0xa8, + 0x27, 0x35, 0x7c, 0x8e, 0xa5, 0x19, 0x8e, 0xf0, 0xd5, 0x97, 0x0a, 0x2c, 0x0d, 0x1a, 0x57, 0xf4, + 0x75, 0x0f, 0xda, 0x73, 0x1c, 0x56, 0x3b, 0x3c, 0x0d, 0xfa, 0x49, 0x81, 0x93, 0xbd, 0xc8, 0x05, + 0x9f, 0x00, 0xce, 0x28, 0x62, 0x3a, 0x10, 0x4f, 0xc0, 0x96, 0x90, 0x62, 0xa9, 0x45, 0x17, 0x60, + 0xa2, 0x6a, 0xda, 0xe5, 0x6d, 0xfa, 0x4d, 0x44, 0x76, 0xe3, 0x1e, 0xbc, 0x23, 0xe5, 0x38, 0xb6, + 0x40, 0xb7, 0x60, 0x4e, 0xf8, 0xad, 0x11, 0xbb, 0xe2, 0x57, 0x45, 0xb1, 0xc4, 0x34, 0xe7, 0x92, + 0x47, 0x61, 0xab, 0x43, 0x8f, 0xbb, 0x3c, 0xd4, 0xbf, 0x14, 0x40, 0xff, 0xe4, 0xbd, 0x3f, 0x0f, + 0x79, 0xd3, 0xa5, 0x82, 0xf6, 0x85, 0x53, 0x90, 0x37, 0xa6, 0x5b, 0xcd, 0x52, 0x7e, 0x65, 0xf3, + 0x6e, 0x28, 0xc4, 0x89, 0x9e, 0x1b, 0x47, 0x0f, 0x61, 0xf8, 0xe0, 0x49, 0xe3, 0x28, 0x30, 0xc3, + 0x89, 0x1e, 0x5d, 0x87, 0x29, 0xab, 0x16, 0x30, 0x9f, 0x78, 0xdb, 0x96, 0xe3, 0x12, 0xb1, 0x35, + 0x26, 0x8c, 0x93, 0x32, 0xa7, 0xa9, 0xd5, 0x36, 0x1d, 0x4e, 0x59, 0x22, 0x0d, 0x80, 0xb7, 0x3c, + 0x73, 0x4d, 0x1e, 0x27, 0x27, 0xe2, 0xcc, 0xf0, 0x0b, 0xdb, 0x88, 0xa5, 0xb8, 0xcd, 0x42, 0x7d, + 0x06, 0xa7, 0xb6, 0x89, 0xd7, 0xa0, 0x16, 0x59, 0xb1, 0x2c, 0x27, 0xb0, 0xfd, 0x88, 0xc0, 0xea, + 0x90, 0x8f, 0xcd, 0xe4, 0x54, 0x9c, 0x90, 0xf1, 0xf3, 0x31, 0x16, 0x4e, 0x6c, 0xe2, 0x31, 0xcc, + 0xf4, 0x1d, 0xc3, 0x5f, 0x32, 0x30, 0x9e, 0xc0, 0x67, 0xf7, 0xa8, 0x5d, 0x96, 0xc8, 0xa7, 0x23, + 0xeb, 0xfb, 0xd4, 0x2e, 0xbf, 0x6d, 0x96, 0x26, 0xa5, 0x19, 0xff, 0xc4, 0xc2, 0x10, 0xdd, 0x83, + 0x6c, 0xc0, 0x88, 0x27, 0x07, 0xec, 0xc2, 0xc0, 0x6e, 0x7e, 0xc8, 0x88, 0x17, 0x31, 0xa0, 0x09, + 0x0e, 0xcd, 0x05, 0x58, 0x60, 0xa0, 0x0d, 0xc8, 0x55, 0xf8, 0xad, 0xc8, 0xcd, 0x7f, 0x71, 0x20, + 0x58, 0x3b, 0xb5, 0x0f, 0x1b, 0x41, 0x48, 0x70, 0x08, 0x83, 0x3c, 0x98, 0x61, 0xa9, 0x22, 0x8a, + 0x0b, 0x1b, 0x86, 0xd1, 0xf4, 0xac, 0xbd, 0x81, 0x5a, 0xcd, 0xd2, 0x4c, 0x5a, 0x85, 0x3b, 0x22, + 0xa8, 0x3a, 0x4c, 0xb6, 0xa5, 0x38, 0x78, 0x09, 0x1a, 0xda, 0xc1, 0x61, 0x71, 0xe4, 0xd5, 0x61, + 0x71, 0xe4, 0xf5, 0x61, 0x71, 0xe4, 0xdb, 0x56, 0x51, 0x39, 0x68, 0x15, 0x95, 0x57, 0xad, 0xa2, + 0xf2, 0xba, 0x55, 0x54, 0xde, 0xb4, 0x8a, 0xca, 0xf7, 0xbf, 0x15, 0x47, 0x1e, 0x4f, 0x44, 0x47, + 0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xb3, 0x17, 0x48, 0x11, 0x14, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto new file mode 100644 index 00000000000..6134b5e6999 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -0,0 +1,436 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.flowcontrol.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + // +listType=set + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be non-negative. + // Note that if the precedence is not specified or zero, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=set + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=set + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=set + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=set + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + // +listType=set + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // Required + // +unionDiscriminator + optional string kind = 1; + + // +optional + optional UserSubject user = 2; + + // +optional + optional GroupSubject group = 3; + + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go new file mode 100644 index 00000000000..0c8a9cc5657 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go new file mode 100644 index 00000000000..41073bdcb14 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -0,0 +1,513 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + // +listType=set + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be non-negative. + // Note that if the precedence is not specified or zero, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=set + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=set + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=set + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=set + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + // +listType=set + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000000..08380a1b1c0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be non-negative. Note that if the precedence is not specified or zero, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "Required", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f5d37954b1d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,541 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto index 7be0a7f7a40..9679dafc510 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -151,7 +151,7 @@ message PodDisruptionBudgetSpec { // PodDisruptionBudget. Status may trail the actual state of a system. message PodDisruptionBudgetStatus { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. + // status information is valid only if observedGeneration equals to PDB's object generation. // +optional optional int64 observedGeneration = 1; diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go index b3001bb5c0d..d8e417ab263 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go @@ -48,7 +48,7 @@ type PodDisruptionBudgetSpec struct { // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. + // status information is valid only if observedGeneration equals to PDB's object generation. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -276,7 +276,7 @@ var AllowAllCapabilities v1.Capability = "*" // FSType gives strong typing to different file systems that are used by volumes. type FSType string -var ( +const ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 0bbc48f9f8a..40a951c417c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -126,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { var map_PodDisruptionBudgetStatus = map[string]string{ "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", "currentHealthy": "current number of healthy pods", diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index b16715bc492..5c50a87eed4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -37,6 +37,7 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional @@ -55,6 +56,7 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -69,7 +71,8 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -79,7 +82,8 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. message ClusterRoleList { // Standard object's metadata. // +optional @@ -117,6 +121,7 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional @@ -130,6 +135,7 @@ message Role { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -145,6 +151,7 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -154,7 +161,8 @@ message RoleBindingList { repeated RoleBinding items = 2; } -// RoleList is a collection of Roles +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. message RoleList { // Standard object's metadata. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types.go index 521cce4f31d..a5d3e38f657 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -103,6 +103,7 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -120,6 +121,7 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -138,6 +140,7 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -150,7 +153,8 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// RoleList is a collection of Roles +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -166,6 +170,7 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -197,6 +202,7 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -214,7 +220,8 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -227,7 +234,8 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d7b194ae407..8238de21d69 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", + "": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 07bf735a06b..87e0dbdfdd3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -37,6 +37,7 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional @@ -55,6 +56,7 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -69,7 +71,8 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -79,7 +82,8 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. message ClusterRoleList { // Standard object's metadata. // +optional @@ -117,6 +121,7 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional @@ -130,6 +135,7 @@ message Role { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -145,6 +151,7 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -155,6 +162,7 @@ message RoleBindingList { } // RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. message RoleList { // Standard object's metadata. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types.go index 35843c90d18..74c70936a43 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -102,6 +102,7 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -119,6 +120,7 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -137,6 +139,7 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -150,6 +153,7 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -165,6 +169,7 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -195,6 +200,7 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -212,7 +218,8 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -225,7 +232,8 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index c80327593d7..8e9d7ace795 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", + "": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go index 782f50693f3..3d09ee7e449 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -46,10 +46,122 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{0} + return fileDescriptor_3b530c1983504d8d, []int{4} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +189,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{1} + return fileDescriptor_3b530c1983504d8d, []int{5} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +217,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{2} + return fileDescriptor_3b530c1983504d8d, []int{6} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +245,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{3} + return fileDescriptor_3b530c1983504d8d, []int{7} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +273,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{4} + return fileDescriptor_3b530c1983504d8d, []int{8} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +301,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{5} + return fileDescriptor_3b530c1983504d8d, []int{9} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +329,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{6} + return fileDescriptor_3b530c1983504d8d, []int{10} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +357,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{7} + return fileDescriptor_3b530c1983504d8d, []int{11} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +382,39 @@ func (m *VolumeError) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeError proto.InternalMessageInfo +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{12} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo + func init() { + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") @@ -281,6 +425,7 @@ func init() { proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1.VolumeNodeResources") } func init() { @@ -288,71 +433,264 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1018 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, - 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, - 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, - 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, - 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, - 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, - 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, - 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, - 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, - 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, - 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, - 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, - 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, - 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, - 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, - 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, - 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, - 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, - 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, - 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, - 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, - 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, - 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, - 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, - 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, - 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, - 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, - 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, - 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, - 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, - 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, - 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, - 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, - 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, - 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, - 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, - 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, - 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, - 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, - 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, - 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, - 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, - 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, - 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, - 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, - 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, - 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, - 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, - 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, - 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, - 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, - 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, - 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, - 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, - 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, - 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, - 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, - 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, - 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, - 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, - 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, - 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, - 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, + // 1212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0xae, 0x9b, 0xa4, 0x4d, 0x27, 0x2d, 0x9b, 0xce, 0x16, 0x08, 0x39, 0x24, 0x95, 0x41, 0x10, + 0x0a, 0xeb, 0x6c, 0x97, 0x65, 0xb5, 0x42, 0x02, 0x29, 0x6e, 0x23, 0x51, 0xd1, 0xb4, 0xd5, 0xb4, + 0xac, 0x10, 0x02, 0xc4, 0xd4, 0x1e, 0x52, 0x6f, 0x62, 0x8f, 0xf1, 0x4c, 0x02, 0xb9, 0x71, 0xe2, + 0x86, 0x04, 0x57, 0x7e, 0x05, 0x5c, 0x39, 0x72, 0x2a, 0xb7, 0x15, 0xa7, 0x3d, 0x45, 0xd4, 0x9c, + 0xe1, 0x07, 0xf4, 0x84, 0x66, 0x3c, 0x8d, 0x9d, 0xc4, 0x29, 0xe9, 0xa5, 0xb7, 0xcc, 0x9b, 0xf7, + 0x7d, 0xef, 0xbd, 0xf9, 0xde, 0xbc, 0x71, 0xc0, 0x07, 0x9d, 0xc7, 0xcc, 0x70, 0x68, 0xbd, 0xd3, + 0x3b, 0x25, 0x81, 0x47, 0x38, 0x61, 0xf5, 0x3e, 0xf1, 0x6c, 0x1a, 0xd4, 0xd5, 0x06, 0xf6, 0x9d, + 0x3a, 0xe3, 0x34, 0xc0, 0x6d, 0x52, 0xef, 0x6f, 0xd7, 0xdb, 0xc4, 0x23, 0x01, 0xe6, 0xc4, 0x36, + 0xfc, 0x80, 0x72, 0x0a, 0x5f, 0x8c, 0xdc, 0x0c, 0xec, 0x3b, 0x86, 0x72, 0x33, 0xfa, 0xdb, 0xe5, + 0x7b, 0x6d, 0x87, 0x9f, 0xf5, 0x4e, 0x0d, 0x8b, 0xba, 0xf5, 0x36, 0x6d, 0xd3, 0xba, 0xf4, 0x3e, + 0xed, 0x7d, 0x25, 0x57, 0x72, 0x21, 0x7f, 0x45, 0x2c, 0x65, 0x3d, 0x11, 0xcc, 0xa2, 0x41, 0x5a, + 0xa4, 0xf2, 0xc3, 0xd8, 0xc7, 0xc5, 0xd6, 0x99, 0xe3, 0x91, 0x60, 0x50, 0xf7, 0x3b, 0x6d, 0x61, + 0x60, 0x75, 0x97, 0x70, 0x9c, 0x86, 0xaa, 0xcf, 0x42, 0x05, 0x3d, 0x8f, 0x3b, 0x2e, 0x99, 0x02, + 0x3c, 0xfa, 0x3f, 0x00, 0xb3, 0xce, 0x88, 0x8b, 0x27, 0x71, 0xfa, 0xaf, 0x1a, 0x58, 0xde, 0x39, + 0xde, 0x3b, 0xa0, 0x36, 0x81, 0x5f, 0x82, 0xbc, 0xc8, 0xc7, 0xc6, 0x1c, 0x97, 0xb4, 0x4d, 0xad, + 0x56, 0x78, 0x70, 0xdf, 0x88, 0xcf, 0x69, 0x44, 0x6b, 0xf8, 0x9d, 0xb6, 0x30, 0x30, 0x43, 0x78, + 0x1b, 0xfd, 0x6d, 0xe3, 0xf0, 0xf4, 0x29, 0xb1, 0x78, 0x8b, 0x70, 0x6c, 0xc2, 0xf3, 0x61, 0x75, + 0x21, 0x1c, 0x56, 0x41, 0x6c, 0x43, 0x23, 0x56, 0xb8, 0x0b, 0xb2, 0xcc, 0x27, 0x56, 0x69, 0x51, + 0xb2, 0xeb, 0x46, 0xaa, 0x0a, 0x86, 0xca, 0xe7, 0xd8, 0x27, 0x96, 0xb9, 0xaa, 0xf8, 0xb2, 0x62, + 0x85, 0x24, 0x5a, 0xff, 0x57, 0x03, 0x6b, 0xca, 0x67, 0x37, 0x70, 0xfa, 0x24, 0x80, 0x9b, 0x20, + 0xeb, 0x61, 0x97, 0xc8, 0xac, 0x57, 0x62, 0xcc, 0x01, 0x76, 0x09, 0x92, 0x3b, 0xf0, 0x75, 0xb0, + 0xe4, 0x51, 0x9b, 0xec, 0xed, 0xca, 0xd8, 0x2b, 0xe6, 0x0b, 0xca, 0x67, 0xe9, 0x40, 0x5a, 0x91, + 0xda, 0x85, 0x0f, 0xc1, 0x2a, 0xa7, 0x3e, 0xed, 0xd2, 0xf6, 0xe0, 0x23, 0x32, 0x60, 0xa5, 0xcc, + 0x66, 0xa6, 0xb6, 0x62, 0x16, 0xc3, 0x61, 0x75, 0xf5, 0x24, 0x61, 0x47, 0x63, 0x5e, 0xf0, 0x73, + 0x50, 0xc0, 0xdd, 0x2e, 0xb5, 0x30, 0xc7, 0xa7, 0x5d, 0x52, 0xca, 0xca, 0xf2, 0xb6, 0x66, 0x94, + 0xf7, 0x84, 0x76, 0x7b, 0x2e, 0x11, 0x71, 0x11, 0x61, 0xb4, 0x17, 0x58, 0x84, 0x99, 0x77, 0xc2, + 0x61, 0xb5, 0xd0, 0x88, 0x29, 0x50, 0x92, 0x4f, 0xff, 0x45, 0x03, 0x05, 0x55, 0xf0, 0xbe, 0xc3, + 0x38, 0xfc, 0x6c, 0x4a, 0x28, 0x63, 0x3e, 0xa1, 0x04, 0x5a, 0xca, 0x54, 0x54, 0xe5, 0xe7, 0xaf, + 0x2c, 0x09, 0x91, 0x76, 0x40, 0xce, 0xe1, 0xc4, 0x65, 0xa5, 0xc5, 0xcd, 0x4c, 0xad, 0xf0, 0xa0, + 0x72, 0xbd, 0x4a, 0xe6, 0x9a, 0xa2, 0xca, 0xed, 0x09, 0x10, 0x8a, 0xb0, 0xfa, 0x17, 0xa3, 0x8c, + 0x85, 0x70, 0xf0, 0x10, 0x2c, 0xdb, 0x52, 0x2a, 0x56, 0xd2, 0x24, 0xeb, 0x6b, 0xd7, 0xb3, 0x46, + 0xba, 0x9a, 0x77, 0x14, 0xf7, 0x72, 0xb4, 0x66, 0xe8, 0x8a, 0x45, 0xff, 0x61, 0x09, 0xac, 0x1e, + 0x47, 0xb0, 0x9d, 0x2e, 0x66, 0xec, 0x16, 0x9a, 0xf7, 0x5d, 0x50, 0xf0, 0x03, 0xda, 0x77, 0x98, + 0x43, 0x3d, 0x12, 0xa8, 0x3e, 0xba, 0xab, 0x20, 0x85, 0xa3, 0x78, 0x0b, 0x25, 0xfd, 0x60, 0x1b, + 0x00, 0x1f, 0x07, 0xd8, 0x25, 0x5c, 0x54, 0x9f, 0x91, 0xd5, 0xbf, 0x33, 0xa3, 0xfa, 0x64, 0x45, + 0xc6, 0xd1, 0x08, 0xd5, 0xf4, 0x78, 0x30, 0x88, 0xb3, 0x8b, 0x37, 0x50, 0x82, 0x1a, 0x76, 0xc0, + 0x5a, 0x40, 0xac, 0x2e, 0x76, 0xdc, 0x23, 0xda, 0x75, 0xac, 0x81, 0x6c, 0xc3, 0x15, 0xb3, 0x19, + 0x0e, 0xab, 0x6b, 0x28, 0xb9, 0x71, 0x39, 0xac, 0xde, 0x9f, 0x9e, 0x5c, 0xc6, 0x11, 0x09, 0x98, + 0xc3, 0x38, 0xf1, 0x78, 0xd4, 0xa1, 0x63, 0x18, 0x34, 0xce, 0x2d, 0xee, 0x89, 0x4b, 0x7b, 0x1e, + 0x3f, 0xf4, 0xb9, 0x43, 0x3d, 0x56, 0xca, 0xc5, 0xf7, 0xa4, 0x95, 0xb0, 0xa3, 0x31, 0x2f, 0xb8, + 0x0f, 0x36, 0x44, 0x5f, 0x7f, 0x13, 0x05, 0x68, 0x7e, 0xeb, 0x63, 0x4f, 0x9c, 0x52, 0x69, 0x69, + 0x53, 0xab, 0xe5, 0xcd, 0x52, 0x38, 0xac, 0x6e, 0x34, 0x52, 0xf6, 0x51, 0x2a, 0x0a, 0x7e, 0x02, + 0xd6, 0xfb, 0xd2, 0x64, 0x3a, 0x9e, 0xed, 0x78, 0xed, 0x16, 0xb5, 0x49, 0x69, 0x59, 0x16, 0xbd, + 0x15, 0x0e, 0xab, 0xeb, 0x4f, 0x26, 0x37, 0x2f, 0xd3, 0x8c, 0x68, 0x9a, 0x04, 0x7e, 0x0d, 0xd6, + 0x65, 0x44, 0x62, 0xab, 0x4b, 0xef, 0x10, 0x56, 0xca, 0x4b, 0xe9, 0x6a, 0x49, 0xe9, 0xc4, 0xd1, + 0x09, 0xdd, 0xae, 0x46, 0xc3, 0x31, 0xe9, 0x12, 0x8b, 0xd3, 0xe0, 0x84, 0x04, 0xae, 0xf9, 0x8a, + 0xd2, 0x6b, 0xbd, 0x31, 0x49, 0x85, 0xa6, 0xd9, 0xcb, 0xef, 0x83, 0x3b, 0x13, 0x82, 0xc3, 0x22, + 0xc8, 0x74, 0xc8, 0x20, 0x1a, 0x6a, 0x48, 0xfc, 0x84, 0x1b, 0x20, 0xd7, 0xc7, 0xdd, 0x1e, 0x89, + 0x9a, 0x0f, 0x45, 0x8b, 0xf7, 0x16, 0x1f, 0x6b, 0xfa, 0x6f, 0x1a, 0x28, 0x26, 0xbb, 0xe7, 0x16, + 0xe6, 0xc4, 0x87, 0xe3, 0x73, 0xe2, 0xd5, 0x39, 0x7a, 0x7a, 0xc6, 0xb0, 0xf8, 0x79, 0x11, 0x14, + 0x23, 0x5d, 0x1a, 0x9c, 0x63, 0xeb, 0xcc, 0x25, 0x1e, 0xbf, 0x85, 0x0b, 0xdd, 0x1a, 0x7b, 0x8d, + 0xde, 0xba, 0x76, 0x5c, 0xc7, 0x89, 0xcd, 0x7a, 0x96, 0xe0, 0xc7, 0x60, 0x89, 0x71, 0xcc, 0x7b, + 0xe2, 0x92, 0x0b, 0xc2, 0x7b, 0xf3, 0x12, 0x4a, 0x50, 0xfc, 0x22, 0x45, 0x6b, 0xa4, 0xc8, 0xf4, + 0xdf, 0x35, 0xb0, 0x31, 0x09, 0xb9, 0x05, 0x75, 0xf7, 0xc7, 0xd5, 0x7d, 0x63, 0xce, 0x62, 0x66, + 0x28, 0xfc, 0xa7, 0x06, 0x5e, 0x9a, 0xaa, 0x5b, 0xbe, 0x7d, 0x62, 0x26, 0xf8, 0x13, 0x93, 0xe7, + 0x20, 0x7e, 0xcb, 0xe5, 0x4c, 0x38, 0x4a, 0xd9, 0x47, 0xa9, 0x28, 0xf8, 0x14, 0x14, 0x1d, 0xaf, + 0xeb, 0x78, 0x24, 0xb2, 0x1d, 0xc7, 0xfa, 0xa6, 0x5e, 0xdc, 0x49, 0x66, 0x29, 0xee, 0x46, 0x38, + 0xac, 0x16, 0xf7, 0x26, 0x58, 0xd0, 0x14, 0xaf, 0xfe, 0x47, 0x8a, 0x32, 0xf2, 0xb5, 0x7b, 0x1b, + 0xe4, 0xb1, 0xb4, 0x90, 0x40, 0x95, 0x31, 0x3a, 0xe9, 0x86, 0xb2, 0xa3, 0x91, 0x87, 0xec, 0x1b, + 0x79, 0x14, 0x2a, 0xd1, 0xb9, 0xfb, 0x46, 0x82, 0x12, 0x7d, 0x23, 0xd7, 0x48, 0x91, 0x89, 0x24, + 0xc4, 0x37, 0x8d, 0x3c, 0xcb, 0xcc, 0x78, 0x12, 0x07, 0xca, 0x8e, 0x46, 0x1e, 0xfa, 0x3f, 0x99, + 0x14, 0x81, 0x64, 0x03, 0x26, 0xaa, 0xb1, 0x65, 0x35, 0xf9, 0xa9, 0x6a, 0xec, 0x51, 0x35, 0x36, + 0xfc, 0x49, 0x03, 0x10, 0x8f, 0x28, 0x5a, 0x57, 0x0d, 0x1a, 0x75, 0x51, 0xf3, 0x46, 0x57, 0xc2, + 0x68, 0x4c, 0xf1, 0x44, 0x2f, 0x61, 0x59, 0xc5, 0x87, 0xd3, 0x0e, 0x28, 0x25, 0x38, 0xb4, 0x41, + 0x21, 0xb2, 0x36, 0x83, 0x80, 0x06, 0xea, 0x7a, 0xea, 0xd7, 0xe6, 0x22, 0x3d, 0xcd, 0x8a, 0xfc, + 0x2c, 0x8b, 0xa1, 0x97, 0xc3, 0x6a, 0x21, 0xb1, 0x8f, 0x92, 0xb4, 0x22, 0x8a, 0x4d, 0xe2, 0x28, + 0xd9, 0x9b, 0x45, 0xd9, 0x25, 0xb3, 0xa3, 0x24, 0x68, 0xcb, 0x4d, 0xf0, 0xf2, 0x8c, 0x63, 0xb9, + 0xd1, 0x7b, 0xf1, 0xbd, 0x06, 0x92, 0x31, 0xe0, 0x3e, 0xc8, 0x8a, 0xbf, 0x09, 0x6a, 0x90, 0x6c, + 0xcd, 0x37, 0x48, 0x4e, 0x1c, 0x97, 0xc4, 0xa3, 0x50, 0xac, 0x90, 0x64, 0x81, 0x6f, 0x82, 0x65, + 0x97, 0x30, 0x86, 0xdb, 0x2a, 0x72, 0xfc, 0x21, 0xd7, 0x8a, 0xcc, 0xe8, 0x6a, 0x5f, 0x7f, 0x04, + 0xee, 0xa6, 0x7c, 0x10, 0xc3, 0x2a, 0xc8, 0x59, 0xe2, 0xcb, 0x41, 0x26, 0x94, 0x33, 0x57, 0xc4, + 0x44, 0xd9, 0x11, 0x06, 0x14, 0xd9, 0xcd, 0xda, 0xf9, 0x45, 0x65, 0xe1, 0xd9, 0x45, 0x65, 0xe1, + 0xf9, 0x45, 0x65, 0xe1, 0xbb, 0xb0, 0xa2, 0x9d, 0x87, 0x15, 0xed, 0x59, 0x58, 0xd1, 0x9e, 0x87, + 0x15, 0xed, 0xaf, 0xb0, 0xa2, 0xfd, 0xf8, 0x77, 0x65, 0xe1, 0xd3, 0xc5, 0xfe, 0xf6, 0x7f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x5c, 0x59, 0x23, 0xb9, 0x2c, 0x0e, 0x00, 0x00, +} + +func (m *CSINode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drivers) > 0 { + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *StorageClass) Marshal() (dAtA []byte, err error) { @@ -813,17 +1151,113 @@ func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) return base } +func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *StorageClass) Size() (n int) { if m == nil { return 0 @@ -988,12 +1422,79 @@ func (m *VolumeError) Size() (n int) { return n } +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + repeatedStringForDrivers + `,`, + `}`, + }, "") + return s +} func (this *StorageClass) String() string { if this == nil { return "nil" @@ -1101,39 +1602,560 @@ func (this *VolumeAttachmentStatus) String() string { for k := range this.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s + return nil } -func (this *VolumeError) String() string { - if this == nil { - return "nil" +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2587,6 +3609,79 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto index 4087b7e5d3b..e5004c84243 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto @@ -29,6 +29,80 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + optional VolumeNodeResources allocatable = 4; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. // @@ -193,3 +267,13 @@ message VolumeError { optional string message = 2; } +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go index 473c687278b..67493fd0fab 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go @@ -49,6 +49,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachment{}, &VolumeAttachmentList{}, + + &CSINode{}, + &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go index e42ee5e9c59..86cb78b6401 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -216,3 +216,97 @@ type VolumeError struct { // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 78ff19c2112..d6e3a16293d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -27,6 +27,47 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSINode = map[string]string{ + "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -116,4 +157,13 @@ func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index eb8626e6e01..76255a0af74 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -25,6 +25,115 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -271,3 +380,24 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto index bb2cf3450b3..373a154b112 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -121,6 +121,8 @@ message CSIDriverSpec { repeated string volumeLifecycleModes = 3; } +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as // they use the node-driver-registrar sidecar container, the kubelet will diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go index fa1bae1d8d5..a8faeb9d130 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go @@ -348,6 +348,8 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as // they use the node-driver-registrar sidecar container, the kubelet will diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index fe80a8e5074..53fa666ba0a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -59,7 +59,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { } var map_CSINode = map[string]string{ - "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "": "DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", "metadata": "metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } diff --git a/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index 178e91eedc2..dd5a3a48a03 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -62,8 +62,12 @@ const ( // owner: @sttts // alpha: v1.15 + // beta: v1.16 + // GA: v1.17 // // CustomResourceDefaulting enables OpenAPI defaulting in CustomResources. + // + // TODO: remove in 1.18 CustomResourceDefaulting featuregate.Feature = "CustomResourceDefaulting" ) @@ -79,5 +83,5 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CustomResourceSubresources: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, CustomResourceWebhookConversion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, CustomResourcePublishOpenAPI: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - CustomResourceDefaulting: {Default: true, PreRelease: featuregate.Beta}, + CustomResourceDefaulting: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 63434030ca1..435297a8d51 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -23,4 +23,3 @@ reviewers: - krousey - cjcullen - david-mcmahon -- goltermann diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 95d5c7a3553..e53c3e61fd1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -70,6 +70,28 @@ func (e *StatusError) DebugError() (string, []interface{}) { return "server response object: %#v", []interface{}{e.ErrStatus} } +// HasStatusCause returns true if the provided error has a details cause +// with the provided type name. +func HasStatusCause(err error, name metav1.CauseType) bool { + _, ok := StatusCause(err, name) + return ok +} + +// StatusCause returns the named cause from the provided error if it exists and +// the error is of the type APIStatus. Otherwise it returns false. +func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { + apierr, ok := err.(APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return metav1.StatusCause{}, false + } + for _, cause := range apierr.Status().Details.Causes { + if cause.Type == name { + return cause, true + } + } + return metav1.StatusCause{}, false +} + // UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. type UnexpectedObjectError struct { Object runtime.Object @@ -201,6 +223,7 @@ func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError } // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +// DEPRECATED: Please use NewResourceExpired instead. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index dd2c0cb6142..96bccff1b23 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -17,11 +17,7 @@ reviewers: - eparis - dims - krousey -- markturansky -- fabioy - resouer - david-mcmahon - mfojtik - jianhuiz -- feihujiang -- ghodss diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index 8454be55ed0..dc7740190ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -11,8 +11,6 @@ reviewers: - janetkuo - tallclair - eparis -- jbeda - xiang90 - mbohlool - david-mcmahon -- goltermann diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD index 138da23de0b..0fa9122633e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD @@ -1,25 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "register_test.go", - "roundtrip_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - ], -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -40,7 +21,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", ], ) @@ -53,6 +33,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme:all-srcs", + ], tags = ["automanaged"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 3fea2c380e5..b56140de5fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -21,15 +21,11 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" ) // GroupName is the group name for this API. const GroupName = "meta.k8s.io" -// Scheme is the registry for any type that adheres to the meta API spec. -var scheme = runtime.NewScheme() - var ( // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. @@ -38,22 +34,16 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Codecs provides access to encoding and decoding for the scheme. -var Codecs = serializer.NewCodecFactory(scheme) - // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(scheme) - // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // addToGroupVersion registers common meta types into schemas. -func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error { +func addToGroupVersion(scheme *runtime.Scheme) error { if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { return err } @@ -104,7 +94,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) // Unlike other API groups, meta internal knows about all meta external versions, but keeps // the logic for conversion private. func init() { - if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil { - panic(err) - } + localSchemeBuilder.Register(addToGroupVersion) + localSchemeBuilder.Register(metav1.RegisterConversions) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/BUILD new file mode 100644 index 00000000000..53e7cd79c90 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme", + importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "register_test.go", + "roundtrip_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go similarity index 73% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go rename to cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go index 51f3adfc712..a45fa2a8a55 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -14,13 +14,4 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() -} +package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go new file mode 100644 index 00000000000..472a9aeb236 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + utilruntime.Must(internalversion.AddToScheme(scheme)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 119e5d166ec..970e1c88f0c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -24,6 +24,7 @@ go_test( deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -54,6 +55,7 @@ go_library( "types.go", "types_swagger_doc_generated.go", "watch.go", + "zz_generated.conversion.go", "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 44929b1c002..77cfb0c1aa3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -30,4 +30,3 @@ reviewers: - mqliang - kevin-wangzefeng - jianhuiz -- feihujiang diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 042cd5b9c55..15b45ffa84b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -22,7 +22,7 @@ import ( // IsControlledBy checks if the object has a controllerRef set to the given owner func IsControlledBy(obj Object, owner Object) bool { - ref := GetControllerOf(obj) + ref := GetControllerOfNoCopy(obj) if ref == nil { return false } @@ -31,9 +31,20 @@ func IsControlledBy(obj Object, owner Object) bool { // GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller func GetControllerOf(controllee Object) *OwnerReference { - for _, ref := range controllee.GetOwnerReferences() { - if ref.Controller != nil && *ref.Controller { - return &ref + ref := GetControllerOfNoCopy(controllee) + if ref == nil { + return nil + } + cp := *ref + return &cp +} + +// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +func GetControllerOfNoCopy(controllee Object) *OwnerReference { + refs := controllee.GetOwnerReferences() + for i := range refs { + if refs[i].Controller != nil && *refs[i].Controller { + return &refs[i] } } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index d07069ef245..285a41a4228 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -18,6 +18,7 @@ package v1 import ( "fmt" + "net/url" "strconv" "strings" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -35,12 +37,17 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_v1_ListMeta_To_v1_ListMeta, + Convert_v1_DeleteOptions_To_v1_DeleteOptions, + Convert_intstr_IntOrString_To_intstr_IntOrString, + Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString, + Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString, Convert_Pointer_v1_Duration_To_v1_Duration, Convert_v1_Duration_To_Pointer_v1_Duration, Convert_Slice_string_To_v1_Time, + Convert_Slice_string_To_Pointer_v1_Time, Convert_v1_Time_To_v1_Time, Convert_v1_MicroTime_To_v1_MicroTime, @@ -76,7 +83,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Slice_string_To_Slice_int32, - Convert_Slice_string_To_v1_DeletionPropagation, + Convert_Slice_string_To_Pointer_v1_DeletionPropagation, Convert_Slice_string_To_v1_IncludeObjectPolicy, ) @@ -194,12 +201,33 @@ func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) e return nil } +// +k8s:conversion-fn=copy-only +func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error { + *out = *in + return nil +} + // +k8s:conversion-fn=copy-only func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { *out = *in return nil } +func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error { + if *in == nil { + *out = intstr.IntOrString{} // zero value + return nil + } + *out = **in // copy + return nil +} + +func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error { + temp := *in // copy + *out = &temp + return nil +} + // +k8s:conversion-fn=copy-only func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. @@ -230,14 +258,30 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s } // Convert_Slice_string_To_v1_Time allows converting a URL query parameter value -func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error { +func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error { str := "" - if len(*input) > 0 { - str = (*input)[0] + if len(*in) > 0 { + str = (*in)[0] } return out.UnmarshalQueryParameter(str) } +func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error { + if in == nil { + return nil + } + str := "" + if len(*in) > 0 { + str = (*in)[0] + } + temp := Time{} + if err := temp.UnmarshalQueryParameter(str); err != nil { + return err + } + *out = &temp + return nil +} + func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { @@ -310,20 +354,53 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio return nil } -// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy -func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { - if len(*input) > 0 { - *out = DeletionPropagation((*input)[0]) +// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error { + var str string + if len(*in) > 0 { + str = (*in)[0] } else { - *out = "" + str = "" } + temp := DeletionPropagation(str) + *out = &temp return nil } // Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value -func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { - if len(*input) > 0 { - *out = IncludeObjectPolicy((*input)[0]) +func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions. +func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil { + return err + } + + uid := types.UID("") + if values, ok := (*in)["uid"]; ok && len(values) > 0 { + uid = types.UID(values[0]) + } + + resourceVersion := "" + if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 { + resourceVersion = values[0] + } + + if len(uid) > 0 || len(resourceVersion) > 0 { + if out.Preconditions == nil { + out.Preconditions = &Preconditions{} + } + if len(uid) > 0 { + out.Preconditions.UID = &uid + } + if len(resourceVersion) > 0 { + out.Preconditions.ResourceVersion = &resourceVersion + } } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index dbaa87c879f..7736753d668 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=false // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2025e039812..ba1194dcc56 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -163,6 +163,7 @@ message DeleteOptions { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional optional Preconditions preconditions = 2; @@ -416,9 +417,6 @@ message ListOptions { // If this is not a watch, this field is ignored. // If the feature gate WatchBookmarks is not enabled in apiserver, // this field is ignored. - // - // This field is beta. - // // +optional optional bool allowWatchBookmarks = 9; @@ -663,6 +661,15 @@ message ObjectMeta { // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge repeated string finalizers = 14; @@ -681,8 +688,6 @@ message ObjectMeta { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional repeated ManagedFieldsEntry managedFields = 17; } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 368efe1efd9..a7b8aa34f9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -25,6 +25,13 @@ import ( // GroupName is the group name for this API. const GroupName = "meta.k8s.io" +var ( + // localSchemeBuilder is used to make compiler happy for autogenerated + // conversions. However, it's not used. + schemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &schemeBuilder +) + // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} @@ -40,6 +47,31 @@ func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func addEventConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_WatchEvent_To_watch_Event, + Convert_v1_InternalEvent_To_v1_WatchEvent, + Convert_watch_Event_To_v1_WatchEvent, + Convert_v1_WatchEvent_To_v1_InternalEvent, + ) +} + +var optionsTypes = []runtime.Object{ + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + &PatchOptions{}, +} + // AddToGroupVersion registers common meta types into schemas. func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) @@ -48,21 +80,7 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &InternalEvent{}, ) // Supports legacy code paths, most callers should use metav1.ParameterCodec for now - scheme.AddKnownTypes(groupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - &PatchOptions{}, - ) - utilruntime.Must(scheme.AddConversionFuncs( - Convert_v1_WatchEvent_To_watch_Event, - Convert_v1_InternalEvent_To_v1_WatchEvent, - Convert_watch_Event_To_v1_WatchEvent, - Convert_v1_WatchEvent_To_v1_InternalEvent, - )) + scheme.AddKnownTypes(groupVersion, optionsTypes...) // Register Unversioned types under their own special group scheme.AddUnversionedTypes(Unversioned, &Status{}, @@ -72,36 +90,14 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &APIResourceList{}, ) - // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - utilruntime.Must(AddConversionFuncs(scheme)) - utilruntime.Must(RegisterDefaults(scheme)) -} - -// scheme is the registry for the common types that adhere to the meta v1 API spec. -var scheme = runtime.NewScheme() - -// ParameterCodec knows about query parameters used with the meta v1 API spec. -var ParameterCodec = runtime.NewParameterCodec(scheme) - -func init() { - scheme.AddUnversionedTypes(SchemeGroupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - &PatchOptions{}, - ) - - if err := AddMetaToScheme(scheme); err != nil { - panic(err) - } + utilruntime.Must(addEventConversionFuncs(scheme)) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(AddConversionFuncs(scheme)) utilruntime.Must(RegisterDefaults(scheme)) } +// AddMetaToScheme registers base meta types into schemas. func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, @@ -114,3 +110,12 @@ func AddMetaToScheme(scheme *runtime.Scheme) error { Convert_Slice_string_To_v1_IncludeObjectPolicy, ) } + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) + + utilruntime.Must(AddMetaToScheme(scheme)) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterDefaults(scheme)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index a6ecf33123c..bf125b62a73 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -250,6 +250,15 @@ type ObjectMeta struct { // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` @@ -268,8 +277,6 @@ type ObjectMeta struct { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } @@ -315,6 +322,7 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ListOptions is the query options to a standard REST list call. @@ -344,9 +352,6 @@ type ListOptions struct { // If this is not a watch, this field is ignored. // If the feature gate WatchBookmarks is not enabled in apiserver, // this field is ignored. - // - // This field is beta. - // // +optional AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` @@ -397,6 +402,7 @@ type ListOptions struct { Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. @@ -411,6 +417,7 @@ type ExportOptions struct { Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // GetOptions is the standard query options to the standard REST get call. @@ -448,6 +455,7 @@ const ( DryRunAll = "All" ) +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DeleteOptions may be provided when deleting an API object. @@ -463,6 +471,7 @@ type DeleteOptions struct { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` @@ -493,6 +502,7 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CreateOptions may be provided when creating an API object. @@ -516,6 +526,7 @@ type CreateOptions struct { FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PatchOptions may be provided when patching an API object. @@ -548,6 +559,7 @@ type PatchOptions struct { FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. @@ -1260,6 +1272,7 @@ const ( ) // TableOptions are used when a Table is requested by the caller. +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type TableOptions struct { TypeMeta `json:",inline"` diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 33a10104c0b..b62e591ee87 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -194,7 +194,7 @@ var map_ListOptions = map[string]string{ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", @@ -234,9 +234,9 @@ var map_ObjectMeta = map[string]string{ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD index 3450d869db7..47046794675 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD @@ -21,6 +21,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", @@ -44,6 +45,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 7ea0986f358..4244b8a6df1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog" ) // NestedFieldCopy returns a deep copy of the value of a nested field. @@ -329,6 +330,8 @@ var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} type unstructuredJSONScheme struct{} +const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON" + func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { var err error if obj != nil { @@ -349,7 +352,14 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, return obj, &gvk, nil } -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { +func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case *Unstructured: return json.NewEncoder(w).Encode(t.Object) @@ -373,6 +383,11 @@ func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (unstructuredJSONScheme) Identifier() runtime.Identifier { + return unstructuredJSONSchemeIdentifier +} + func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { type detector struct { Items gojson.RawMessage @@ -400,12 +415,6 @@ func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) erro return s.decodeToUnstructured(data, x) case *UnstructuredList: return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err default: return json.Unmarshal(data, x) } @@ -460,12 +469,30 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList return nil } -type JSONFallbackEncoder struct { - runtime.Encoder +type jsonFallbackEncoder struct { + encoder runtime.Encoder + identifier runtime.Identifier } -func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { - err := c.Encoder.Encode(obj, w) +func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder { + result := map[string]string{ + "name": "fallback", + "base": string(encoder.Identifier()), + } + identifier, err := gojson.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err) + } + return &jsonFallbackEncoder{ + encoder: encoder, + identifier: runtime.Identifier(identifier), + } +} + +func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we only + // fallback to other encoders here. + err := c.encoder.Encode(obj, w) if runtime.IsNotRegisteredError(err) { switch obj.(type) { case *Unstructured, *UnstructuredList: @@ -474,3 +501,8 @@ func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { } return err } + +// Identifier implements runtime.Encoder interface. +func (c *jsonFallbackEncoder) Identifier() runtime.Identifier { + return c.identifier +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..2ade69dd9eb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -0,0 +1,523 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + url "net/url" + unsafe "unsafe" + + resource "k8s.io/apimachinery/pkg/api/resource" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + watch "k8s.io/apimachinery/pkg/watch" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_CreateOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil { + return err + } + } else { + out.GracePeriodSeconds = nil + } + // INFO: in.Preconditions opted out of conversion generation + if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil { + return err + } + } else { + out.OrphanDependents = nil + } + if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil { + return err + } + } else { + out.PropagationPolicy = nil + } + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + return nil +} + +func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil { + return err + } + } else { + out.Export = false + } + if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil { + return err + } + } else { + out.Exact = false + } + return nil +} + +// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ExportOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + return nil +} + +// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_GetOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil { + return err + } + } else { + out.LabelSelector = "" + } + if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil { + return err + } + } else { + out.FieldSelector = "" + } + if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil { + return err + } + } else { + out.Watch = false + } + if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil { + return err + } + } else { + out.AllowWatchBookmarks = false + } + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil { + return err + } + } else { + out.TimeoutSeconds = nil + } + if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil { + return err + } + } else { + out.Limit = 0 + } + if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil { + return err + } + } else { + out.Continue = "" + } + return nil +} + +// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ListOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil { + return err + } + } else { + out.Force = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PatchOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil { + return err + } + } else { + out.NoHeaders = false + } + if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil { + return err + } + } else { + out.IncludeObject = "" + } + return nil +} + +// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_TableOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD index 6cba6c2aa46..a6fe14401de 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD @@ -21,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index 108a0764e72..4b4acd72f12 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -19,6 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // GroupName is the group name for this API. @@ -38,12 +39,7 @@ var scheme = runtime.NewScheme() // ParameterCodec knows about query parameters used with the meta v1beta1 API spec. var ParameterCodec = runtime.NewParameterCodec(scheme) -func init() { - if err := AddMetaToScheme(scheme); err != nil { - panic(err) - } -} - +// AddMetaToScheme registers base meta types into schemas. func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, @@ -55,7 +51,11 @@ func AddMetaToScheme(scheme *runtime.Scheme) error { return scheme.AddConversionFuncs( Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, ) +} + +func init() { + utilruntime.Must(AddMetaToScheme(scheme)) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + utilruntime.Must(RegisterDefaults(scheme)) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 9be9e57d3af..2f8e1e2b0c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -54,6 +54,11 @@ type Selector interface { // Make a deep copy of the selector. DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) } // Everything returns a selector that matches all labels. @@ -63,12 +68,13 @@ func Everything() Selector { type nothingSelector struct{} -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } -func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false } // Nothing returns a selector that matches no labels func Nothing() Selector { @@ -358,6 +364,23 @@ func (lsel internalSelector) String() string { return strings.Join(reqs, ",") } +// RequiresExactMatch introspect whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range lsel { + if lsel[ix].key == label { + switch lsel[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(lsel[ix].strValues) == 1 { + return lsel[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + // Token represents constant definition for lexer token type Token int @@ -850,7 +873,7 @@ func SelectorFromSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { r, err := NewRequirement(label, selection.Equals, []string{value}) if err == nil { @@ -862,7 +885,7 @@ func SelectorFromSet(ls Set) Selector { } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. @@ -872,13 +895,13 @@ func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // ParseToRequirements takes a string representing a selector and returns a list of diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD index f26d903a0e5..f63f13cce76 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -50,6 +50,7 @@ go_library( "helper.go", "interfaces.go", "mapper.go", + "negotiate.go", "register.go", "scheme.go", "scheme_builder.go", diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 284e32bc3cb..0bccf9dd95b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -19,13 +19,17 @@ package runtime import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "io" "net/url" "reflect" + "strconv" + "strings" "k8s.io/apimachinery/pkg/conversion/queryparams" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // codec binds an encoder and decoder. @@ -100,10 +104,19 @@ type NoopEncoder struct { var _ Serializer = NoopEncoder{} +const noopEncoderIdentifier Identifier = "noop" + func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we don't + // process the obj at all. return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) } +// Identifier implements runtime.Encoder interface. +func (n NoopEncoder) Identifier() Identifier { + return noopEncoderIdentifier +} + // NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. type NoopDecoder struct { Encoder @@ -193,19 +206,51 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u type base64Serializer struct { Encoder Decoder + + identifier Identifier } func NewBase64Serializer(e Encoder, d Decoder) Serializer { - return &base64Serializer{e, d} + return &base64Serializer{ + Encoder: e, + Decoder: d, + identifier: identifier(e), + } +} + +func identifier(e Encoder) Identifier { + result := map[string]string{ + "name": "base64", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err) + } + return Identifier(identifier) } func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + if co, ok := obj.(CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, stream) + } + return s.doEncode(obj, stream) +} + +func (s base64Serializer) doEncode(obj Object, stream io.Writer) error { e := base64.NewEncoder(base64.StdEncoding, stream) err := s.Encoder.Encode(obj, e) e.Close() return err } +// Identifier implements runtime.Encoder interface. +func (s base64Serializer) Identifier() Identifier { + return s.identifier +} + func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) n, err := base64.StdEncoding.Decode(out, data) @@ -238,6 +283,11 @@ var ( DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} ) +const ( + internalGroupVersionerIdentifier = "internal" + disabledGroupVersionerIdentifier = "disabled" +) + type internalGroupVersioner struct{} // KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. @@ -253,6 +303,11 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } +// Identifier implements GroupVersioner interface. +func (internalGroupVersioner) Identifier() string { + return internalGroupVersionerIdentifier +} + type disabledGroupVersioner struct{} // KindForGroupVersionKinds returns false for any input. @@ -260,19 +315,9 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } -// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. -type GroupVersioners []GroupVersioner - -// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. -func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { - for _, gv := range gvs { - target, ok := gv.KindForGroupVersionKinds(kinds) - if !ok { - continue - } - return target, true - } - return schema.GroupVersionKind{}, false +// Identifier implements GroupVersioner interface. +func (disabledGroupVersioner) Identifier() string { + return disabledGroupVersionerIdentifier } // Assert that schema.GroupVersion and GroupVersions implement GroupVersioner @@ -330,3 +375,22 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio } return schema.GroupVersionKind{}, false } + +// Identifier implements GroupVersioner interface. +func (v multiGroupVersioner) Identifier() string { + groupKinds := make([]string, 0, len(v.acceptedGroupKinds)) + for _, gk := range v.acceptedGroupKinds { + groupKinds = append(groupKinds, gk.String()) + } + result := map[string]string{ + "name": "multi", + "target": v.target.String(), + "accepted": strings.Join(groupKinds, ","), + "coerce": strconv.FormatBool(v.coerce), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err) + } + return string(identifier) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index 08d2abfe687..0947dce7356 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -61,19 +61,21 @@ var DefaultStringConversions = []interface{}{ Convert_Slice_string_To_int64, } -func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error { + if len(*in) == 0 { *out = "" + return nil } - *out = (*input)[0] + *out = (*in)[0] return nil } -func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error { + if len(*in) == 0 { *out = 0 + return nil } - str := (*input)[0] + str := (*in)[0] i, err := strconv.Atoi(str) if err != nil { return err @@ -83,15 +85,16 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) } // Convert_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. // Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error { + if len(*in) == 0 { *out = false return nil } - switch strings.ToLower((*input)[0]) { - case "false", "0": + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): *out = false default: *out = true @@ -99,15 +102,79 @@ func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope return nil } -func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { - if len(*input) == 0 { +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error { + if len(*in) == 0 { + boolVar := false + *out = &boolVar + return nil + } + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + boolVar := false + *out = &boolVar + default: + boolVar := true + *out = &boolVar + } + return nil +} + +func string_to_int64(in string) (int64, error) { + return strconv.ParseInt(in, 10, 64) +} + +func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error { + if in == nil { + *out = 0 + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = i + return nil +} + +func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error { + if len(*in) == 0 { *out = 0 + return nil } - str := (*input)[0] - i, err := strconv.ParseInt(str, 10, 64) + i, err := string_to_int64((*in)[0]) if err != nil { return err } *out = i return nil } + +func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error { + if in == nil { + *out = nil + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = &i + return nil +} + +func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = nil + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = &i + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index bded5bf1591..f44693c0c6f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -37,13 +37,36 @@ type GroupVersioner interface { // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) + // Identifier returns string representation of the object. + // Identifiers of two different encoders should be equal only if for every input + // kinds they return the same result. + Identifier() string } +// Identifier represents an identifier. +// Identitier of two different objects should be equal if and only if for every +// input the output they produce is exactly the same. +type Identifier string + // Encoder writes objects to a serialized form type Encoder interface { // Encode writes an object to a stream. Implementations may return errors if the versions are // incompatible, or if no conversion is defined. Encode(obj Object, w io.Writer) error + // Identifier returns an identifier of the encoder. + // Identifiers of two different encoders should be equal if and only if for every input + // object it will be encoded to the same representation by both of them. + // + // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // correctly handle CacheableObject, Encode() method should look similar to below, where + // doEncode() is the encoding logic of implemented encoder: + // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { + // if co, ok := obj.(CacheableObject); ok { + // return co.CacheEncode(e.Identifier(), e.doEncode, w) + // } + // return e.doEncode(obj, w) + // } + Identifier() Identifier } // Decoder attempts to load an object from data. @@ -132,6 +155,28 @@ type NegotiatedSerializer interface { DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder } +// ClientNegotiator handles turning an HTTP content type into the appropriate encoder. +// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from +// a NegotiatedSerializer. +type ClientNegotiator interface { + // Encoder returns the appropriate encoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Encoder(contentType string, params map[string]string) (Encoder, error) + // Decoder returns the appropriate decoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Decoder(contentType string, params map[string]string) (Decoder, error) + // StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g. + // application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no + // serializer is found a NegotiateError will be returned. The Serializer and Framer will always + // be returned if a Decoder is returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) +} + // StorageSerializer is an interface used for obtaining encoders, decoders, and serializers // that can read and write data at rest. This would commonly be used by client tools that must // read files, or server side storage interfaces that persist restful objects. @@ -256,6 +301,27 @@ type Object interface { DeepCopyObject() Object } +// CacheableObject allows an object to cache its different serializations +// to avoid performing the same serialization multiple times. +type CacheableObject interface { + // CacheEncode writes an object to a stream. The function will + // be used in case of cache miss. The function takes ownership + // of the object. + // If CacheableObject is a wrapper, then deep-copy of the wrapped object + // should be passed to function. + // CacheEncode assumes that for two different calls with the same , + // function will also be the same. + CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error + // GetObject returns a deep-copy of an object to be encoded - the caller of + // GetObject() is the owner of returned object. The reason for making a copy + // is to avoid bugs, where caller modifies the object and forgets to copy it, + // thus modifying the object for everyone. + // The object returned by GetObject should be the same as the one that is supposed + // to be passed to function in CacheEncode method. + // If CacheableObject is a wrapper, the copy of wrapped object should be returned. + GetObject() Object +} + // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go new file mode 100644 index 00000000000..159b301206a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NegotiateError is returned when a ClientNegotiator is unable to locate +// a serializer for the requested operation. +type NegotiateError struct { + ContentType string + Stream bool +} + +func (e NegotiateError) Error() string { + if e.Stream { + return fmt.Sprintf("no stream serializers registered for %s", e.ContentType) + } + return fmt.Sprintf("no serializers registered for %s", e.ContentType) +} + +type clientNegotiator struct { + serializer NegotiatedSerializer + encode, decode GroupVersioner +} + +func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) { + // TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method + // if client negotiators truly need to use it + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil +} + +func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil +} + +func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true} + } + info = mediaTypes[0] + } + if info.StreamSerializer == nil { + return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true} + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil +} + +// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or +// stream decoder for a given content type. Does not perform any conversion, but will +// encode the object to the desired group, version, and kind. Use when creating a client. +func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: serializer, + encode: gv, + } +} + +// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver +// where objects are converted to gv prior to sending and decoded to their internal representation prior +// to retrieval. +// +// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. +func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + decode := schema.GroupVersions{ + { + Group: gv.Group, + Version: APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: APIVersionInternal, + }, + } + return &clientNegotiator{ + encode: gv, + decode: decode, + serializer: serializer, + } +} + +// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used +// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. +func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: &simpleNegotiatedSerializer{info: info}, + encode: gv, + } +} + +type simpleNegotiatedSerializer struct { + info SerializerInfo +} + +func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer { + return &simpleNegotiatedSerializer{info: info} +} + +func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo { + return []SerializerInfo{n.info} +} + +func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder { + return e +} + +func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder { + return d +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/register.go index eeb380c3dc3..1cd2e4c3871 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -29,33 +29,3 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } - -// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind -// interface if no objects are provided, or the ObjectKind interface of the object in the -// highest array position. -func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { - last := obj.Last() - if last == nil { - return schema.EmptyObjectKind - } - return last.GetObjectKind() -} - -// First returns the leftmost object in the VersionedObjects array, which is usually the -// object as serialized on the wire. -func (obj *VersionedObjects) First() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[0] -} - -// Last is the rightmost object in the VersionedObjects array, which is the object after -// all transformations have been applied. This is the same object that would be returned -// by Decode in a normal invocation (without VersionedObjects in the into argument). -func (obj *VersionedObjects) Last() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[len(obj.Objects)-1] -} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 4c67ed59801..636103312f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,6 +191,11 @@ func (gv GroupVersion) String() string { return gv.Version } +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersion) Identifier() string { + return gv.String() +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. @@ -246,6 +251,15 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // in fewer places. type GroupVersions []GroupVersion +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersions) Identifier() string { + groupVersions := make([]string, 0, len(gv)) + for i := range gv { + groupVersions = append(groupVersions, gv[i].String()) + } + return fmt.Sprintf("[%s]", strings.Join(groupVersions, ",")) +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD index 8ff68498d0a..12bba92d10c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = [ + "json_limit_test.go", "json_test.go", "meta_test.go", ], @@ -16,7 +17,9 @@ go_test( deps = [ "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", ], ) @@ -36,6 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/github.com/modern-go/reflect2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 69ada8ecf9c..9d17f09e54c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/util/framer" utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog" ) // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer @@ -53,13 +54,28 @@ func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer ru // and are immutable. func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - options: options, + meta: meta, + creater: creater, + typer: typer, + options: options, + identifier: identifier(options), } } +// identifier computes Identifier of Encoder based on the given options. +func identifier(options SerializerOptions) runtime.Identifier { + result := map[string]string{ + "name": "json", + "yaml": strconv.FormatBool(options.Yaml), + "pretty": strconv.FormatBool(options.Pretty), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err) + } + return runtime.Identifier(identifier) +} + // SerializerOptions holds the options which are used to configure a JSON/YAML serializer. // example: // (1) To configure a JSON serializer, set `Yaml` to `false`. @@ -85,6 +101,8 @@ type Serializer struct { options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper + + identifier runtime.Identifier } // Serializer implements Serializer @@ -188,16 +206,6 @@ func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVer // On success or most errors, the method will return the calculated schema kind. // The gvk calculate priority will be originalData > default gvk > into func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - versioned.Objects = []runtime.Object{obj} - return versioned, actual, nil - } - data := originalData if s.options.Yaml { altered, err := yaml.YAMLToJSON(data) @@ -286,6 +294,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { @@ -311,6 +326,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return encoder.Encode(obj) } +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return s.identifier +} + // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { if s.options.Yaml { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD index 6008b387dce..06fc1ed5575 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD @@ -1,9 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -35,3 +32,14 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["protobuf_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index 0f33e1d821f..f606b7d728b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -86,6 +86,8 @@ type Serializer struct { var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} +const serializerIdentifier runtime.Identifier = "protobuf" + // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -93,23 +95,6 @@ var _ recognizer.RecognizingDecoder = &Serializer{} // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - // the last item in versioned becomes into, so if versioned was not originally empty we reset the object - // array so the first position is the decoded object and the second position is the outermost object. - // if there were no objects in the versioned list passed to us, only add ourselves. - if into != nil && into != obj { - versioned.Objects = []runtime.Object{obj, into} - } else { - versioned.Objects = []runtime.Object{obj} - } - return versioned, actual, err - } - prefixLen := len(s.prefix) switch { case len(originalData) == 0: @@ -176,6 +161,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { prefixSize := uint64(len(s.prefix)) var unk runtime.Unknown @@ -245,6 +237,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return serializerIdentifier +} + // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { prefix := make([]byte, 4) @@ -321,6 +318,8 @@ type RawSerializer struct { var _ runtime.Serializer = &RawSerializer{} +const rawSerializerIdentifier runtime.Identifier = "raw-protobuf" + // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -332,20 +331,6 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) } - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - if into != nil && into != obj { - versioned.Objects = []runtime.Object{obj, into} - } else { - versioned.Objects = []runtime.Object{obj} - } - return versioned, actual, err - } - if len(originalData) == 0 { // TODO: treat like decoding {} from JSON with defaulting return nil, nil, fmt.Errorf("empty data") @@ -419,6 +404,13 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedReverseMarshaller: // this path performs a single allocation during write but requires the caller to implement @@ -460,6 +452,11 @@ func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (s *RawSerializer) Identifier() runtime.Identifier { + return rawSerializerIdentifier +} + var LengthDelimitedFramer = lengthDelimitedFramer{} type lengthDelimitedFramer struct{} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD index 9ba9d16f5a4..9c206b42be4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -17,6 +17,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], @@ -31,6 +32,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index ee5cb86f7e6..ced184c91e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -17,12 +17,15 @@ limitations under the License. package versioning import ( + "encoding/json" "io" "reflect" + "sync" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. @@ -62,6 +65,8 @@ func NewCodec( encodeVersion: encodeVersion, decodeVersion: decodeVersion, + identifier: identifier(encodeVersion, encoder), + originalSchemeName: originalSchemeName, } return internal @@ -78,19 +83,47 @@ type codec struct { encodeVersion runtime.GroupVersioner decodeVersion runtime.GroupVersioner + identifier runtime.Identifier + // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates originalSchemeName string } +var identifiersMap sync.Map + +type codecIdentifier struct { + EncodeGV string `json:"encodeGV,omitempty"` + Encoder string `json:"encoder,omitempty"` + Name string `json:"name,omitempty"` +} + +// identifier computes Identifier of Encoder based on codec parameters. +func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier { + result := codecIdentifier{ + Name: "versioning", + } + + if encodeGV != nil { + result.EncodeGV = encodeGV.Identifier() + } + if encoder != nil { + result.Encoder = string(encoder.Identifier()) + } + if id, ok := identifiersMap.Load(result); ok { + return id.(runtime.Identifier) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for codec: %v", err) + } + identifiersMap.Store(result, runtime.Identifier(identifier)) + return runtime.Identifier(identifier) +} + // Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is // successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an // into that matches the serialized version. func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - versioned, isVersioned := into.(*runtime.VersionedObjects) - if isVersioned { - into = versioned.Last() - } - // If the into object is unstructured and expresses an opinion about its group/version, // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) decodeInto := into @@ -115,22 +148,11 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if into != nil { // perform defaulting if requested if c.defaulter != nil { - // create a copy to ensure defaulting is not applied to the original versioned objects - if isVersioned { - versioned.Objects = []runtime.Object{obj.DeepCopyObject()} - } c.defaulter.Default(obj) - } else { - if isVersioned { - versioned.Objects = []runtime.Object{obj} - } } // Short-circuit conversion if the into object is same object if into == obj { - if isVersioned { - return versioned, gvk, nil - } return into, gvk, nil } @@ -138,19 +160,9 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru return nil, gvk, err } - if isVersioned { - versioned.Objects = append(versioned.Objects, into) - return versioned, gvk, nil - } return into, gvk, nil } - // Convert if needed. - if isVersioned { - // create a copy, because ConvertToVersion does not guarantee non-mutation of objects - versioned.Objects = []runtime.Object{obj.DeepCopyObject()} - } - // perform defaulting if requested if c.defaulter != nil { c.defaulter.Default(obj) @@ -160,18 +172,19 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if err != nil { return nil, gvk, err } - if isVersioned { - if versioned.Last() != out { - versioned.Objects = append(versioned.Objects, out) - } - return versioned, gvk, nil - } return out, gvk, nil } // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { switch obj := obj.(type) { case *runtime.Unknown: return c.encoder.Encode(obj, w) @@ -230,3 +243,8 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object return c.encoder.Encode(out, w) } + +// Identifier implements runtime.Encoder interface. +func (c *codec) Identifier() runtime.Identifier { + return c.identifier +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD deleted file mode 100644 index 83712ab41b9..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["yaml.go"], - importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/yaml", - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go deleted file mode 100644 index 2fdd1d43d52..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// yamlSerializer converts YAML passed to the Decoder methods to JSON. -type yamlSerializer struct { - // the nested serializer - runtime.Serializer -} - -// yamlSerializer implements Serializer -var _ runtime.Serializer = yamlSerializer{} - -// NewDecodingSerializer adds YAML decoding support to a serializer that supports JSON. -func NewDecodingSerializer(jsonSerializer runtime.Serializer) runtime.Serializer { - return &yamlSerializer{jsonSerializer} -} - -func (c yamlSerializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - out, err := yaml.ToJSON(data) - if err != nil { - return nil, nil, err - } - data = out - return c.Serializer.Decode(data, gvk, into) -} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 2f0b6c9e55b..31359f35f45 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -124,16 +124,3 @@ type Unknown struct { // Unspecified means ContentTypeJSON. ContentType string `protobuf:"bytes,4,opt,name=contentType"` } - -// VersionedObjects is used by Decoders to give callers a way to access all versions -// of an object during the decoding process. -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type VersionedObjects struct { - // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occurred, - // other objects may be present. The right most object is the same as would be returned - // by a normal Decode call. - Objects []Object -} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 8b9182f359d..b0393839e1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -73,36 +73,3 @@ func (in *Unknown) DeepCopyObject() Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]Object, len(*in)) - for i := range *in { - if (*in)[i] != nil { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. -func (in *VersionedObjects) DeepCopy() *VersionedObjects { - if in == nil { - return nil - } - out := new(VersionedObjects) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. -func (in *VersionedObjects) DeepCopyObject() Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD index 06a7cfa2000..ff280d456ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD @@ -9,25 +9,29 @@ load( go_test( name = "go_default_test", srcs = [ - "cache_test.go", + "expiring_test.go", "lruexpirecache_test.go", ], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", ], ) go_library( name = "go_default_library", srcs = [ - "cache.go", + "expiring.go", "lruexpirecache.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/cache", importpath = "k8s.io/apimachinery/pkg/util/cache", - deps = ["//vendor/github.com/hashicorp/golang-lru:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//vendor/github.com/hashicorp/golang-lru:go_default_library", + ], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go deleted file mode 100644 index 9a09fe54d6e..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "sync" -) - -const ( - shardsCount int = 32 -) - -type Cache []*cacheShard - -func NewCache(maxSize int) Cache { - if maxSize < shardsCount { - maxSize = shardsCount - } - cache := make(Cache, shardsCount) - for i := 0; i < shardsCount; i++ { - cache[i] = &cacheShard{ - items: make(map[uint64]interface{}), - maxSize: maxSize / shardsCount, - } - } - return cache -} - -func (c Cache) getShard(index uint64) *cacheShard { - return c[index%uint64(shardsCount)] -} - -// Returns true if object already existed, false otherwise. -func (c *Cache) Add(index uint64, obj interface{}) bool { - return c.getShard(index).add(index, obj) -} - -func (c *Cache) Get(index uint64) (obj interface{}, found bool) { - return c.getShard(index).get(index) -} - -type cacheShard struct { - items map[uint64]interface{} - sync.RWMutex - maxSize int -} - -// Returns true if object already existed, false otherwise. -func (s *cacheShard) add(index uint64, obj interface{}) bool { - s.Lock() - defer s.Unlock() - _, isOverwrite := s.items[index] - if !isOverwrite && len(s.items) >= s.maxSize { - var randomKey uint64 - for randomKey = range s.items { - break - } - delete(s.items, randomKey) - } - s.items[index] = obj - return isOverwrite -} - -func (s *cacheShard) get(index uint64) (obj interface{}, found bool) { - s.RLock() - defer s.RUnlock() - obj, found = s.items[index] - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go new file mode 100644 index 00000000000..84b4f588497 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/heap" + "sync" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" +) + +// NewExpiring returns an initialized expiring cache. +func NewExpiring() *Expiring { + return NewExpiringWithClock(utilclock.RealClock{}) +} + +// NewExpiringWithClock is like NewExpiring but allows passing in a custom +// clock for testing. +func NewExpiringWithClock(clock utilclock.Clock) *Expiring { + return &Expiring{ + clock: clock, + cache: make(map[interface{}]entry), + } +} + +// Expiring is a map whose entries expire after a per-entry timeout. +type Expiring struct { + clock utilclock.Clock + + // mu protects the below fields + mu sync.RWMutex + // cache is the internal map that backs the cache. + cache map[interface{}]entry + // generation is used as a cheap resource version for cache entries. Cleanups + // are scheduled with a key and generation. When the cleanup runs, it first + // compares its generation with the current generation of the entry. It + // deletes the entry iff the generation matches. This prevents cleanups + // scheduled for earlier versions of an entry from deleting later versions of + // an entry when Set() is called multiple times with the same key. + // + // The integer value of the generation of an entry is meaningless. + generation uint64 + + heap expiringHeap +} + +type entry struct { + val interface{} + expiry time.Time + generation uint64 +} + +// Get looks up an entry in the cache. +func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.cache[key] + if !ok || !c.clock.Now().Before(e.expiry) { + return nil, false + } + return e.val, true +} + +// Set sets a key/value/expiry entry in the map, overwriting any previous entry +// with the same key. The entry expires at the given expiry time, but its TTL +// may be lengthened or shortened by additional calls to Set(). Garbage +// collection of expired entries occurs during calls to Set(), however calls to +// Get() will not return expired entries that have not yet been garbage +// collected. +func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) { + now := c.clock.Now() + expiry := now.Add(ttl) + + c.mu.Lock() + defer c.mu.Unlock() + + c.generation++ + + c.cache[key] = entry{ + val: val, + expiry: expiry, + generation: c.generation, + } + + // Run GC inline before pushing the new entry. + c.gc(now) + + heap.Push(&c.heap, &expiringHeapEntry{ + key: key, + expiry: expiry, + generation: c.generation, + }) +} + +// Delete deletes an entry in the map. +func (c *Expiring) Delete(key interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.del(key, 0) +} + +// del deletes the entry for the given key. The generation argument is the +// generation of the entry that should be deleted. If the generation has been +// changed (e.g. if a set has occurred on an existing element but the old +// cleanup still runs), this is a noop. If the generation argument is 0, the +// entry's generation is ignored and the entry is deleted. +// +// del must be called under the write lock. +func (c *Expiring) del(key interface{}, generation uint64) { + e, ok := c.cache[key] + if !ok { + return + } + if generation != 0 && generation != e.generation { + return + } + delete(c.cache, key) +} + +// Len returns the number of items in the cache. +func (c *Expiring) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *Expiring) gc(now time.Time) { + for { + // Return from gc if the heap is empty or the next element is not yet + // expired. + // + // heap[0] is a peek at the next element in the heap, which is not obvious + // from looking at the (*expiringHeap).Pop() implmentation below. + // heap.Pop() swaps the first entry with the last entry of the heap, then + // calls (*expiringHeap).Pop() which returns the last element. + if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { + return + } + cleanup := heap.Pop(&c.heap).(*expiringHeapEntry) + c.del(cleanup.key, cleanup.generation) + } +} + +type expiringHeapEntry struct { + key interface{} + expiry time.Time + generation uint64 +} + +// expiringHeap is a min-heap ordered by expiration time of its entries. The +// expiring cache uses this as a priority queue to efficiently organize entries +// which will be garbage collected once they expire. +type expiringHeap []*expiringHeapEntry + +var _ heap.Interface = &expiringHeap{} + +func (cq expiringHeap) Len() int { + return len(cq) +} + +func (cq expiringHeap) Less(i, j int) bool { + return cq[i].expiry.Before(cq[j].expiry) +} + +func (cq expiringHeap) Swap(i, j int) { + cq[i], cq[j] = cq[j], cq[i] +} + +func (cq *expiringHeap) Push(c interface{}) { + *cq = append(*cq, c.(*expiringHeapEntry)) +} + +func (cq *expiringHeap) Pop() interface{} { + c := (*cq)[cq.Len()-1] + *cq = (*cq)[:cq.Len()-1] + return c +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index a006b925a9e..fa9ffa51b74 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -19,6 +19,7 @@ package diff import ( "bytes" "fmt" + "reflect" "strings" "text/tabwriter" @@ -116,3 +117,41 @@ func ObjectGoPrintSideBySide(a, b interface{}) string { w.Flush() return buf.String() } + +// IgnoreUnset is an option that ignores fields that are unset on the right +// hand side of a comparison. This is useful in testing to assert that an +// object is a derivative. +func IgnoreUnset() cmp.Option { + return cmp.Options{ + // ignore unset fields in v2 + cmp.FilterPath(func(path cmp.Path) bool { + _, v2 := path.Last().Values() + switch v2.Kind() { + case reflect.Slice, reflect.Map: + if v2.IsNil() || v2.Len() == 0 { + return true + } + case reflect.String: + if v2.Len() == 0 { + return true + } + case reflect.Interface, reflect.Ptr: + if v2.IsNil() { + return true + } + } + return false + }, cmp.Ignore()), + // ignore map entries that aren't set in v2 + cmp.FilterPath(func(path cmp.Path) bool { + switch i := path.Last().(type) { + case cmp.MapIndex: + if _, v2 := i.Values(); !v2.IsValid() { + fmt.Println("E") + return true + } + } + return false + }, cmp.Ignore()), + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 12c8a7b6cbe..2df62955538 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int +type Type int64 const ( Int Type = iota // The IntOrString holds an int. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb837ed..0e2e3017547 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) default: return json.Unmarshal(data, v) @@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error { // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go index 2965d5a8bc5..d69bf32caa8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[ func extractStackCreator() (string, int, bool) { stack := debug.Stack() matches := stackCreator.FindStringSubmatch(string(stack)) - if matches == nil || len(matches) != 4 { + if len(matches) != 4 { return "", 0, false } line, err := strconv.Atoi(matches[3]) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index daf5d249645..836494d579a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -36,6 +36,18 @@ const ( familyIPv6 AddressFamily = 6 ) +type AddressFamilyPreference []AddressFamily + +var ( + preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} + preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} +) + +const ( + // LoopbackInterfaceName is the default name of the loopback interface + LoopbackInterfaceName = "lo" +) + const ( ipv4RouteFile = "/proc/net/route" ipv6RouteFile = "/proc/net/ipv6_route" @@ -53,7 +65,7 @@ type RouteFile struct { parse func(input io.Reader) ([]Route, error) } -// noRoutesError can be returned by ChooseBindAddress() in case of no routes +// noRoutesError can be returned in case of no routes type noRoutesError struct { message string } @@ -254,7 +266,7 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte return nil, nil } -// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +// memberOf tells if the IP is of the desired family. Used for checking interface addresses. func memberOf(ip net.IP, family AddressFamily) bool { if ip.To4() != nil { return family == familyIPv4 @@ -265,8 +277,8 @@ func memberOf(ip net.IP, family AddressFamily) bool { // chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that // has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. -// Searches for IPv4 addresses, and then IPv6 addresses. -func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { +// addressFamilies determines whether it prefers IPv4 or IPv6 +func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { intfs, err := nw.Interfaces() if err != nil { return nil, err @@ -274,7 +286,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { if len(intfs) == 0 { return nil, fmt.Errorf("no interfaces found on host.") } - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + for _, family := range addressFamilies { klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { @@ -321,15 +333,19 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { // IP of the interface with a gateway on it (with priority given to IPv4). For a node // with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { + return chooseHostInterface(preferIPv4) +} + +func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { var nw networkInterfacer = networkInterface{} if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { - return chooseIPFromHostInterfaces(nw) + return chooseIPFromHostInterfaces(nw, addressFamilies) } routes, err := getAllDefaultRoutes() if err != nil { return nil, err } - return chooseHostInterfaceFromRoute(routes, nw) + return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) } // networkInterfacer defines an interface for several net library functions. Production @@ -377,10 +393,10 @@ func getAllDefaultRoutes() ([]Route, error) { } // chooseHostInterfaceFromRoute cycles through each default route provided, looking for a -// global IP address from the interface for the route. Will first look all each IPv4 route for -// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. -func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { +// global IP address from the interface for the route. addressFamilies determines whether it +// prefers IPv4 or IPv6 +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { + for _, family := range addressFamilies { klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { @@ -401,12 +417,19 @@ func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, return nil, fmt.Errorf("unable to select an IP from default routes.") } -// If bind-address is usable, return it directly -// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default -// interface. -func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { +// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: +// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). +// If bindAddress is unspecified or loopback, it returns the default IP of the same +// address family as bindAddress. +// Otherwise, it just returns bindAddress. +func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { + addressFamilies := preferIPv4 + if bindAddress != nil && memberOf(bindAddress, familyIPv6) { + addressFamilies = preferIPv6 + } + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := ChooseHostInterface() + hostIP, err := chooseHostInterface(addressFamilies) if err != nil { return nil, err } @@ -414,3 +437,21 @@ func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { } return bindAddress, nil } + +// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. +// This is required in case of network setups where default routes are present, but network +// interfaces use only link-local addresses (e.g. as described in RFC5549). +// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. +func ChooseBindAddressForInterface(intfName string) (net.IP, error) { + var nw networkInterfacer = networkInterface{} + for _, family := range preferIPv4 { + ip, err := getIPFromInterface(intfName, family, nw) + if err != nil { + return nil, err + } + if ip != nil { + return ip, nil + } + } + return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index c7348129ae8..1428443f544 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" @@ -56,8 +57,16 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + // Same as stdlib http server code. Manually allocate stack trace buffer size // to prevent excessively large logs const size = 64 << 10 diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index ddf998172c0..c55894e5023 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1014,7 +1014,7 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey setOrderIndex++ } // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. - // the second check is is a sanity check, and should always be true if the first is true. + // the second check is a sanity check, and should always be true if the first is true. if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 1d372a5259c..0cd5d65775a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -204,7 +204,7 @@ func Forbidden(field *Path, detail string) *Error { // Invalid, but the returned error will not include the too-long // value. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} } // TooMany returns a *Error indicating "too many". This is used to diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 2dd99992dca..8e1907c2a69 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -70,7 +70,11 @@ func IsQualifiedName(value string) []string { return errs } -// IsFullyQualifiedName checks if the name is fully qualified. +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { var allErrors field.ErrorList if len(name) == 0 { @@ -85,6 +89,26 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { return allErrors } +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + return allErrors +} + const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" @@ -285,6 +309,26 @@ func IsValidIP(value string) []string { return nil } +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD index 05843e9f17d..149f00a1afd 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -54,6 +54,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go index b05f40f07d8..fb8226ca8de 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/apis/apiserver" - apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" ) func makeAbs(path, base string) (string, error) { @@ -110,11 +110,11 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, con // previously read input from a non-versioned file configuration to the // current input file. legacyPluginsWithUnversionedConfig := sets.NewString("ImagePolicyWebhook", "PodNodeSelector") - externalConfig := &apiserverv1alpha1.AdmissionConfiguration{} + externalConfig := &apiserverv1.AdmissionConfiguration{} for _, pluginName := range pluginNames { if legacyPluginsWithUnversionedConfig.Has(pluginName) { externalConfig.Plugins = append(externalConfig.Plugins, - apiserverv1alpha1.AdmissionPluginConfiguration{ + apiserverv1.AdmissionPluginConfiguration{ Name: pluginName, Path: configFilePath}) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD index 3b8448659b2..739ed439cea 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD @@ -15,7 +15,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -34,7 +34,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/admission/configuration", importpath = "k8s.io/apiserver/pkg/admission/configuration", deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", @@ -42,7 +42,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", - "//staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/listers/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go index 4ed522cf75d..d9b28ad7856 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -21,13 +21,13 @@ import ( "sort" "sync/atomic" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission/plugin/webhook" "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" "k8s.io/client-go/informers" - admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" "k8s.io/client-go/tools/cache" ) @@ -41,7 +41,7 @@ type mutatingWebhookConfigurationManager struct { var _ generic.Source = &mutatingWebhookConfigurationManager{} func NewMutatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { - informer := f.Admissionregistration().V1beta1().MutatingWebhookConfigurations() + informer := f.Admissionregistration().V1().MutatingWebhookConfigurations() manager := &mutatingWebhookConfigurationManager{ configuration: &atomic.Value{}, lister: informer.Lister(), @@ -79,7 +79,7 @@ func (m *mutatingWebhookConfigurationManager) updateConfiguration() { m.configuration.Store(mergeMutatingWebhookConfigurations(configurations)) } -func mergeMutatingWebhookConfigurations(configurations []*v1beta1.MutatingWebhookConfiguration) []webhook.WebhookAccessor { +func mergeMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor { // The internal order of webhooks for each configuration is provided by the user // but configurations themselves can be in any order. As we are going to run these // webhooks in serial, they are sorted here to have a deterministic order. @@ -99,7 +99,7 @@ func mergeMutatingWebhookConfigurations(configurations []*v1beta1.MutatingWebhoo return accessors } -type MutatingWebhookConfigurationSorter []*v1beta1.MutatingWebhookConfiguration +type MutatingWebhookConfigurationSorter []*v1.MutatingWebhookConfiguration func (a MutatingWebhookConfigurationSorter) ByName(i, j int) bool { return a[i].Name < a[j].Name diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go index 04f96a44ba8..37062b082e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -21,13 +21,13 @@ import ( "sort" "sync/atomic" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission/plugin/webhook" "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" "k8s.io/client-go/informers" - admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" "k8s.io/client-go/tools/cache" ) @@ -41,7 +41,7 @@ type validatingWebhookConfigurationManager struct { var _ generic.Source = &validatingWebhookConfigurationManager{} func NewValidatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { - informer := f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations() + informer := f.Admissionregistration().V1().ValidatingWebhookConfigurations() manager := &validatingWebhookConfigurationManager{ configuration: &atomic.Value{}, lister: informer.Lister(), @@ -80,7 +80,7 @@ func (v *validatingWebhookConfigurationManager) updateConfiguration() { v.configuration.Store(mergeValidatingWebhookConfigurations(configurations)) } -func mergeValidatingWebhookConfigurations(configurations []*v1beta1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor { +func mergeValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor { sort.SliceStable(configurations, ValidatingWebhookConfigurationSorter(configurations).ByName) accessors := []webhook.WebhookAccessor{} for _, c := range configurations { @@ -97,7 +97,7 @@ func mergeValidatingWebhookConfigurations(configurations []*v1beta1.ValidatingWe return accessors } -type ValidatingWebhookConfigurationSorter []*v1beta1.ValidatingWebhookConfiguration +type ValidatingWebhookConfigurationSorter []*v1.ValidatingWebhookConfiguration func (a ValidatingWebhookConfigurationSorter) ByName(i, j int) bool { return a[i].Name < a[j].Name diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD index 130de6417b8..88d6f2f304f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD @@ -19,6 +19,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/component-base/featuregate:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go index 822885a5e87..740db6d6155 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go @@ -21,25 +21,30 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" ) type pluginInitializer struct { externalClient kubernetes.Interface externalInformers informers.SharedInformerFactory authorizer authorizer.Authorizer + featureGates featuregate.FeatureGate } // New creates an instance of admission plugins initializer. -// TODO(p0lyn0mial): make the parameters public, this construction seems to be redundant. +// This constructor is public with a long param list so that callers immediately know that new information can be expected +// during compilation when they update a level. func New( extClientset kubernetes.Interface, extInformers informers.SharedInformerFactory, authz authorizer.Authorizer, + featureGates featuregate.FeatureGate, ) pluginInitializer { return pluginInitializer{ externalClient: extClientset, externalInformers: extInformers, authorizer: authz, + featureGates: featureGates, } } @@ -57,6 +62,10 @@ func (i pluginInitializer) Initialize(plugin admission.Interface) { if wants, ok := plugin.(WantsAuthorizer); ok { wants.SetAuthorizer(i.authorizer) } + + if wants, ok := plugin.(WantsFeatures); ok { + wants.InspectFeatureGates(i.featureGates) + } } var _ admission.PluginInitializer = pluginInitializer{} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go index 9bddb7155cd..2b3031aa279 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go @@ -21,6 +21,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" ) // WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it @@ -40,3 +41,14 @@ type WantsAuthorizer interface { SetAuthorizer(authorizer.Authorizer) admission.InitializationValidator } + +// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin. +// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one +// and assign it to a simple bool in the admission plugin struct. +// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ +// a.myFeatureIsOn = features.Enabled("my-feature") +// } +type WantsFeatures interface { + InspectFeatureGates(featuregate.FeatureGate) + admission.InitializationValidator +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD index e15ed9a25b2..98e129cb9d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD index 5e605f4af59..21217f11e05 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD @@ -34,10 +34,12 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 1fbac451742..5db82404583 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -170,8 +170,15 @@ func (l *Lifecycle) Admit(ctx context.Context, a admission.Attributes, o admissi return nil } - // TODO: This should probably not be a 403 - return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated", a.GetNamespace())) + err := admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated", a.GetNamespace())) + if apierr, ok := err.(*errors.StatusError); ok { + apierr.ErrStatus.Details.Causes = append(apierr.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: v1.NamespaceTerminatingCause, + Message: fmt.Sprintf("namespace %s is being terminated", a.GetNamespace()), + Field: "metadata.namespace", + }) + } + return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/BUILD index 6ac42aec05a..835025eb005 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/BUILD @@ -7,7 +7,7 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook", visibility = ["//visibility:public"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", @@ -49,7 +49,7 @@ go_test( srcs = ["accessors_test.go"], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go index 4d5243ead6d..0cbc21c8267 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go @@ -19,7 +19,7 @@ package webhook import ( "sync" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" webhookutil "k8s.io/apiserver/pkg/util/webhook" @@ -46,37 +46,37 @@ type WebhookAccessor interface { // needed, use GetUID. GetName() string // GetClientConfig gets the webhook ClientConfig field. - GetClientConfig() v1beta1.WebhookClientConfig + GetClientConfig() v1.WebhookClientConfig // GetRules gets the webhook Rules field. - GetRules() []v1beta1.RuleWithOperations + GetRules() []v1.RuleWithOperations // GetFailurePolicy gets the webhook FailurePolicy field. - GetFailurePolicy() *v1beta1.FailurePolicyType + GetFailurePolicy() *v1.FailurePolicyType // GetMatchPolicy gets the webhook MatchPolicy field. - GetMatchPolicy() *v1beta1.MatchPolicyType + GetMatchPolicy() *v1.MatchPolicyType // GetNamespaceSelector gets the webhook NamespaceSelector field. GetNamespaceSelector() *metav1.LabelSelector // GetObjectSelector gets the webhook ObjectSelector field. GetObjectSelector() *metav1.LabelSelector // GetSideEffects gets the webhook SideEffects field. - GetSideEffects() *v1beta1.SideEffectClass + GetSideEffects() *v1.SideEffectClass // GetTimeoutSeconds gets the webhook TimeoutSeconds field. GetTimeoutSeconds() *int32 // GetAdmissionReviewVersions gets the webhook AdmissionReviewVersions field. GetAdmissionReviewVersions() []string // GetMutatingWebhook if the accessor contains a MutatingWebhook, returns it and true, else returns false. - GetMutatingWebhook() (*v1beta1.MutatingWebhook, bool) + GetMutatingWebhook() (*v1.MutatingWebhook, bool) // GetValidatingWebhook if the accessor contains a ValidatingWebhook, returns it and true, else returns false. - GetValidatingWebhook() (*v1beta1.ValidatingWebhook, bool) + GetValidatingWebhook() (*v1.ValidatingWebhook, bool) } // NewMutatingWebhookAccessor creates an accessor for a MutatingWebhook. -func NewMutatingWebhookAccessor(uid, configurationName string, h *v1beta1.MutatingWebhook) WebhookAccessor { +func NewMutatingWebhookAccessor(uid, configurationName string, h *v1.MutatingWebhook) WebhookAccessor { return &mutatingWebhookAccessor{uid: uid, configurationName: configurationName, MutatingWebhook: h} } type mutatingWebhookAccessor struct { - *v1beta1.MutatingWebhook + *v1.MutatingWebhook uid string configurationName string @@ -126,19 +126,19 @@ func (m *mutatingWebhookAccessor) GetName() string { return m.Name } -func (m *mutatingWebhookAccessor) GetClientConfig() v1beta1.WebhookClientConfig { +func (m *mutatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { return m.ClientConfig } -func (m *mutatingWebhookAccessor) GetRules() []v1beta1.RuleWithOperations { +func (m *mutatingWebhookAccessor) GetRules() []v1.RuleWithOperations { return m.Rules } -func (m *mutatingWebhookAccessor) GetFailurePolicy() *v1beta1.FailurePolicyType { +func (m *mutatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { return m.FailurePolicy } -func (m *mutatingWebhookAccessor) GetMatchPolicy() *v1beta1.MatchPolicyType { +func (m *mutatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { return m.MatchPolicy } @@ -150,7 +150,7 @@ func (m *mutatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { return m.ObjectSelector } -func (m *mutatingWebhookAccessor) GetSideEffects() *v1beta1.SideEffectClass { +func (m *mutatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { return m.SideEffects } @@ -162,21 +162,21 @@ func (m *mutatingWebhookAccessor) GetAdmissionReviewVersions() []string { return m.AdmissionReviewVersions } -func (m *mutatingWebhookAccessor) GetMutatingWebhook() (*v1beta1.MutatingWebhook, bool) { +func (m *mutatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { return m.MutatingWebhook, true } -func (m *mutatingWebhookAccessor) GetValidatingWebhook() (*v1beta1.ValidatingWebhook, bool) { +func (m *mutatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { return nil, false } // NewValidatingWebhookAccessor creates an accessor for a ValidatingWebhook. -func NewValidatingWebhookAccessor(uid, configurationName string, h *v1beta1.ValidatingWebhook) WebhookAccessor { +func NewValidatingWebhookAccessor(uid, configurationName string, h *v1.ValidatingWebhook) WebhookAccessor { return &validatingWebhookAccessor{uid: uid, configurationName: configurationName, ValidatingWebhook: h} } type validatingWebhookAccessor struct { - *v1beta1.ValidatingWebhook + *v1.ValidatingWebhook uid string configurationName string @@ -226,19 +226,19 @@ func (v *validatingWebhookAccessor) GetName() string { return v.Name } -func (v *validatingWebhookAccessor) GetClientConfig() v1beta1.WebhookClientConfig { +func (v *validatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { return v.ClientConfig } -func (v *validatingWebhookAccessor) GetRules() []v1beta1.RuleWithOperations { +func (v *validatingWebhookAccessor) GetRules() []v1.RuleWithOperations { return v.Rules } -func (v *validatingWebhookAccessor) GetFailurePolicy() *v1beta1.FailurePolicyType { +func (v *validatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { return v.FailurePolicy } -func (v *validatingWebhookAccessor) GetMatchPolicy() *v1beta1.MatchPolicyType { +func (v *validatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { return v.MatchPolicy } @@ -250,7 +250,7 @@ func (v *validatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { return v.ObjectSelector } -func (v *validatingWebhookAccessor) GetSideEffects() *v1beta1.SideEffectClass { +func (v *validatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { return v.SideEffects } @@ -262,11 +262,11 @@ func (v *validatingWebhookAccessor) GetAdmissionReviewVersions() []string { return v.AdmissionReviewVersions } -func (v *validatingWebhookAccessor) GetMutatingWebhook() (*v1beta1.MutatingWebhook, bool) { +func (v *validatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { return nil, false } -func (v *validatingWebhookAccessor) GetValidatingWebhook() (*v1beta1.ValidatingWebhook, bool) { +func (v *validatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { return v.ValidatingWebhook, true } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD index 08c4e6037e1..8848f7d6a7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,6 +12,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", ], ) @@ -32,3 +33,9 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["kubeconfig_test.go"], + embed = [":go_default_library"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD index 7a536295a6c..8f8bfcc1d5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD @@ -30,6 +30,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/install:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1:all-srcs", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go index c958d15baa5..2f49b897615 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go @@ -43,9 +43,11 @@ func Resource(resource string) schema.GroupResource { } func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, &WebhookAdmission{}, ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/BUILD new file mode 100644 index 00000000000..01c2ff02cb6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1", + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go similarity index 66% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go index 2ff5732d6db..92cfed10744 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() -} +// Package v1 is the v1 version of the API. +package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go new file mode 100644 index 00000000000..4a9c0a689b0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go similarity index 57% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go index d60c6fddb06..632427d7df9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go @@ -14,19 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package nodeinfo - -// Snapshot is a snapshot of cache NodeInfo. The scheduler takes a -// snapshot at the beginning of each scheduling cycle and uses it for its -// operations in that cycle. -type Snapshot struct { - NodeInfoMap map[string]*NodeInfo - Generation int64 -} +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` -// NewSnapshot initializes a Snapshot struct and returns it. -func NewSnapshot() *Snapshot { - return &Snapshot{ - NodeInfoMap: make(map[string]*NodeInfo), - } + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..65eb414fcc3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go @@ -0,0 +1,67 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..99fc6a6fa7e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go new file mode 100644 index 00000000000..cce2e603a69 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go index 3f5d22f954e..78f5312a475 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -27,6 +27,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1" "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" ) @@ -37,6 +38,7 @@ var ( func init() { utilruntime.Must(webhookadmission.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/BUILD index 5c0ece31253..edb175111eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/admission/v1:go_default_library", "//staging/src/k8s.io/api/admission/v1beta1:go_default_library", - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -52,7 +52,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go index a4a6aff51a5..6fc0eff297d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -23,7 +23,7 @@ import ( admissionv1 "k8s.io/api/admission/v1" admissionv1beta1 "k8s.io/api/admission/v1beta1" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" @@ -155,7 +155,7 @@ func (a *Webhook) ShouldCallHook(h webhook.WebhookAccessor, attr admission.Attri break } } - if invocation == nil && h.GetMatchPolicy() != nil && *h.GetMatchPolicy() == v1beta1.Equivalent { + if invocation == nil && h.GetMatchPolicy() != nil && *h.GetMatchPolicy() == v1.Equivalent { attrWithOverride := &attrWithResourceOverride{Attributes: attr} equivalents := o.GetEquivalentResourceMapper().EquivalentResourcesFor(attr.GetResource(), attr.GetSubresource()) // honor earlier rules first diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD index 791f55207d2..123f0d9f5af 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -13,7 +13,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/admission/v1:go_default_library", - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 7d32e65d991..3d6f022e2f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -28,7 +28,7 @@ import ( "k8s.io/klog" admissionv1 "k8s.io/api/admission/v1" - "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -103,7 +103,7 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib } hook, ok := invocation.Webhook.GetMutatingWebhook() if !ok { - return fmt.Errorf("mutating webhook dispatch requires v1beta1.MutatingWebhook, but got %T", hook) + return fmt.Errorf("mutating webhook dispatch requires v1.MutatingWebhook, but got %T", hook) } // This means that during reinvocation, a webhook will not be // called for the first time. For example, if the webhook is @@ -133,7 +133,7 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib round = 1 } changed, err := a.callAttrMutatingHook(ctx, hook, invocation, versionedAttr, o, round, i) - ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == admissionregistrationv1.Ignore rejected := false if err != nil { switch err := err.(type) { @@ -156,7 +156,7 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() reinvokeCtx.SetShouldReinvoke() } - if hook.ReinvocationPolicy != nil && *hook.ReinvocationPolicy == v1beta1.IfNeededReinvocationPolicy { + if hook.ReinvocationPolicy != nil && *hook.ReinvocationPolicy == admissionregistrationv1.IfNeededReinvocationPolicy { webhookReinvokeCtx.AddReinvocableWebhookToPreviouslyInvoked(invocation.Webhook.GetUID()) } if err == nil { @@ -196,7 +196,7 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib // note that callAttrMutatingHook updates attr -func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *v1beta1.MutatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes, o admission.ObjectInterfaces, round, idx int) (bool, error) { +func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admissionregistrationv1.MutatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes, o admission.ObjectInterfaces, round, idx int) (bool, error) { configurationName := invocation.Webhook.GetConfigurationName() annotator := newWebhookAnnotator(attr, round, idx, h.Name, configurationName) changed := false @@ -205,7 +205,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *v1beta if h.SideEffects == nil { return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} } - if !(*h.SideEffects == v1beta1.SideEffectClassNone || *h.SideEffects == v1beta1.SideEffectClassNoneOnDryRun) { + if !(*h.SideEffects == admissionregistrationv1.SideEffectClassNone || *h.SideEffects == admissionregistrationv1.SideEffectClassNoneOnDryRun) { return false, webhookerrors.NewDryRunUnsupportedErr(h.Name) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD index 24198a9ee3f..b0a4b44f7ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD @@ -27,7 +27,7 @@ go_test( srcs = ["matcher_test.go"], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/BUILD index 06460672963..ef76686dd89 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/BUILD @@ -39,7 +39,7 @@ go_test( srcs = ["matcher_test.go"], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD index 5f17aa95337..ab40b94dd36 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD @@ -42,7 +42,7 @@ go_test( deps = [ "//staging/src/k8s.io/api/admission/v1:go_default_library", "//staging/src/k8s.io/api/admission/v1beta1:go_default_library", - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD index 9b22e407f13..cb9c1a252c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD @@ -7,7 +7,7 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", visibility = ["//visibility:public"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", @@ -19,7 +19,7 @@ go_test( srcs = ["rules_test.go"], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go index 050885323d3..924e79bcc9f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go @@ -19,7 +19,7 @@ package rules import ( "strings" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" @@ -27,7 +27,7 @@ import ( // Matcher determines if the Attr matches the Rule. type Matcher struct { - Rule v1beta1.RuleWithOperations + Rule v1.RuleWithOperations Attr admission.Attributes } @@ -56,15 +56,15 @@ func exactOrWildcard(items []string, requested string) bool { var namespaceResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} func (r *Matcher) scope() bool { - if r.Rule.Scope == nil || *r.Rule.Scope == v1beta1.AllScopes { + if r.Rule.Scope == nil || *r.Rule.Scope == v1.AllScopes { return true } // attr.GetNamespace() is set to the name of the namespace for requests of the namespace object itself. switch *r.Rule.Scope { - case v1beta1.NamespacedScope: + case v1.NamespacedScope: // first make sure that we are not requesting a namespace object (namespace objects are cluster-scoped) return r.Attr.GetResource() != namespaceResource && r.Attr.GetNamespace() != metav1.NamespaceNone - case v1beta1.ClusterScope: + case v1.ClusterScope: // also return true if the request is for a namespace object (namespace objects are cluster-scoped) return r.Attr.GetResource() == namespaceResource || r.Attr.GetNamespace() == metav1.NamespaceNone default: @@ -83,12 +83,12 @@ func (r *Matcher) version() bool { func (r *Matcher) operation() bool { attrOp := r.Attr.GetOperation() for _, op := range r.Rule.Operations { - if op == v1beta1.OperationAll { + if op == v1.OperationAll { return true } // The constants are the same such that this is a valid cast (and this // is tested). - if op == v1beta1.OperationType(attrOp) { + if op == v1.OperationType(attrOp) { return true } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index 6f2fd166a7e..8279e4c7098 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -11,7 +11,7 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", visibility = ["//visibility:public"], deps = [ - "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/api/admissionregistration/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index 20a115acd77..781c4ea3ec2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/api/admissionregistration/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -95,13 +95,13 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr defer wg.Done() hook, ok := invocation.Webhook.GetValidatingWebhook() if !ok { - utilruntime.HandleError(fmt.Errorf("validating webhook dispatch requires v1beta1.ValidatingWebhook, but got %T", hook)) + utilruntime.HandleError(fmt.Errorf("validating webhook dispatch requires v1.ValidatingWebhook, but got %T", hook)) return } versionedAttr := versionedAttrs[invocation.Kind] t := time.Now() err := d.callHook(ctx, hook, invocation, versionedAttr) - ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1.Ignore rejected := false if err != nil { switch err := err.(type) { @@ -161,12 +161,12 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr return errs[0] } -func (d *validatingDispatcher) callHook(ctx context.Context, h *v1beta1.ValidatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes) error { +func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes) error { if attr.Attributes.IsDryRun() { if h.SideEffects == nil { return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} } - if !(*h.SideEffects == v1beta1.SideEffectClassNone || *h.SideEffects == v1beta1.SideEffectClassNoneOnDryRun) { + if !(*h.SideEffects == v1.SideEffectClassNone || *h.SideEffects == v1.SideEffectClassNoneOnDryRun) { return webhookerrors.NewDryRunUnsupportedErr(h.Name) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD index 2ddf6d459ed..eac31bbe4f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD @@ -34,6 +34,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/install:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1:all-srcs", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD index b49139011d9..591d2dbcd74 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD @@ -14,6 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go index 4b58a9710be..f53c7c09190 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go @@ -20,12 +20,19 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/v1" "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" ) // Install registers the API group and adds types to a scheme func Install(scheme *runtime.Scheme) { utilruntime.Must(apiserver.AddToScheme(scheme)) + + // v1alpha is in the k8s.io-suffixed API group utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) + + // v1 is in the config.k8s.io-suffixed API group + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go index 15519d1c47b..7466f45460a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -21,21 +21,15 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -const GroupName = "apiserver.k8s.io" +const LegacyGroupName = "apiserver.k8s.io" +const GroupName = "apiserver.config.k8s.io" + +// LegacySchemeGroupVersion is group version used to register these objects +var LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: runtime.APIVersionInternal} // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme @@ -43,6 +37,10 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, &EgressSelectorConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/BUILD similarity index 67% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/BUILD rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/BUILD index 85d53c672ae..b4da01bc762 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/BUILD @@ -11,16 +11,18 @@ go_library( "doc.go", "register.go", "types.go", + "zz_generated.conversion.go", "zz_generated.deepcopy.go", + "zz_generated.defaults.go", ], - importpath = "k8s.io/kubernetes/pkg/scheduler/api/v1", + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1", + importpath = "k8s.io/apiserver/pkg/apis/apiserver/v1", deps = [ - "//pkg/scheduler/api:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go new file mode 100644 index 00000000000..91458aee4e8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/register.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go similarity index 73% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/register.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go index 504de9c7672..8d3bf987f9c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,31 +17,22 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "scheduler" group -var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} +const GroupName = "apiserver.config.k8s.io" -func init() { - if err := addKnownTypes(schedulerapi.Scheme); err != nil { - // Programmer error. - panic(err) - } -} +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} var ( // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - - // SchemeBuilder is a v1 api scheme builder. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder - // AddToScheme is used to add stored functions to scheme. - AddToScheme = localSchemeBuilder.AddToScheme + AddToScheme = localSchemeBuilder.AddToScheme ) func init() { @@ -51,9 +42,11 @@ func init() { localSchemeBuilder.Register(addKnownTypes) } +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Policy{}, + &AdmissionConfiguration{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go new file mode 100644 index 00000000000..e139dceb9de --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration `json:"plugins"` +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string `json:"name"` + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string `json:"path"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown `json:"configuration"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..5116139ae98 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -0,0 +1,103 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in, out, s) +} + +func autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..a0e039de0d7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -0,0 +1,78 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go new file mode 100644 index 00000000000..cce2e603a69 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD index fab1df119b2..706f160e5df 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD @@ -17,6 +17,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit", importpath = "k8s.io/apiserver/pkg/apis/audit", deps = [ + "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/types.go index 271274d44f5..b497be6df27 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -17,6 +17,7 @@ limitations under the License. package audit import ( + authnv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -92,10 +93,10 @@ type Event struct { // For non-resource requests, this is the lower-cased HTTP method. Verb string // Authenticated user information. - User UserInfo + User authnv1.UserInfo // Impersonated user information. // +optional - ImpersonatedUser *UserInfo + ImpersonatedUser *authnv1.UserInfo // Source IPs, from where the request originated and intermediate proxies. // +optional SourceIPs []string @@ -283,21 +284,3 @@ type ObjectReference struct { // +optional Subresource string } - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - Username string - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - UID string - // The names of groups this user is a part of. - Groups []string - // Any additional information provided by the authenticator. - Extra map[string]ExtraValue -} - -// ExtraValue masks the value so protobuf can generate -type ExtraValue []string diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go index 0e99ab45300..4500bfe3142 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go @@ -117,11 +117,8 @@ func autoConvert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversi out.Stage = audit.Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } - out.ImpersonatedUser = (*audit.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.User = in.User + out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef)) @@ -145,10 +142,7 @@ func autoConvert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversi out.Stage = Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } + out.User = in.User out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go index ac56459ae83..74a18420536 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -23,8 +23,8 @@ package v1alpha1 import ( unsafe "unsafe" - authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -38,16 +38,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_audit_Event_To_v1alpha1_Event(a.(*audit.Event), b.(*Event), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) }); err != nil { @@ -68,16 +58,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_audit_ObjectReference_To_v1alpha1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope) }); err != nil { @@ -139,11 +119,8 @@ func autoConvert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s co out.Stage = audit.Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } - out.ImpersonatedUser = (*audit.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent if in.ObjectRef != nil { @@ -155,7 +132,7 @@ func autoConvert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s co } else { out.ObjectRef = nil } - out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) out.RequestReceivedTimestamp = in.RequestReceivedTimestamp @@ -170,11 +147,8 @@ func autoConvert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s co out.Stage = Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } - out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent if in.ObjectRef != nil { @@ -186,7 +160,7 @@ func autoConvert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s co } else { out.ObjectRef = nil } - out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) out.RequestReceivedTimestamp = in.RequestReceivedTimestamp diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go index ca16088cfea..0aac32119e2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -23,8 +23,8 @@ package v1beta1 import ( unsafe "unsafe" - authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -38,16 +38,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_audit_Event_To_v1beta1_Event(a.(*audit.Event), b.(*Event), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) }); err != nil { @@ -129,15 +119,12 @@ func autoConvert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s con out.Stage = audit.Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } - out.ImpersonatedUser = (*audit.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef)) - out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) out.RequestReceivedTimestamp = in.RequestReceivedTimestamp @@ -152,15 +139,12 @@ func autoConvert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s con out.Stage = Stage(in.Stage) out.RequestURI = in.RequestURI out.Verb = in.Verb - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.User, &out.User, 0); err != nil { - return err - } - out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.User = in.User + out.ImpersonatedUser = (*v1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) out.UserAgent = in.UserAgent out.ObjectRef = (*ObjectReference)(unsafe.Pointer(in.ObjectRef)) - out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) out.RequestReceivedTimestamp = in.RequestReceivedTimestamp diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go index 5049d3db3c5..a210f631b2d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -21,7 +21,8 @@ limitations under the License. package audit import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -32,7 +33,7 @@ func (in *Event) DeepCopyInto(out *Event) { in.User.DeepCopyInto(&out.User) if in.ImpersonatedUser != nil { in, out := &in.ImpersonatedUser, &out.ImpersonatedUser - *out = new(UserInfo) + *out = new(v1.UserInfo) (*in).DeepCopyInto(*out) } if in.SourceIPs != nil { @@ -47,7 +48,7 @@ func (in *Event) DeepCopyInto(out *Event) { } if in.ResponseStatus != nil { in, out := &in.ResponseStatus, &out.ResponseStatus - *out = new(v1.Status) + *out = new(metav1.Status) (*in).DeepCopyInto(*out) } if in.RequestObject != nil { @@ -123,26 +124,6 @@ func (in *EventList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GroupResources) DeepCopyInto(out *GroupResources) { *out = *in @@ -308,39 +289,3 @@ func (in *PolicyRule) DeepCopy() *PolicyRule { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserInfo) DeepCopyInto(out *UserInfo) { - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. -func (in *UserInfo) DeepCopy() *UserInfo { - if in == nil { - return nil - } - out := new(UserInfo) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD index 92726d869cf..869175a4620 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD @@ -19,6 +19,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/audit", importpath = "k8s.io/apiserver/pkg/audit", deps = [ + "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -36,7 +37,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go index 7099e9622cd..f53dfadf15c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -23,9 +23,10 @@ import ( "reflect" "time" - "github.com/pborman/uuid" + "github.com/google/uuid" "k8s.io/klog" + authnv1 "k8s.io/api/authentication/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -57,7 +58,7 @@ func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs a if ids != "" { ev.AuditID = types.UID(ids) } else { - ev.AuditID = types.UID(uuid.NewRandom().String()) + ev.AuditID = types.UID(uuid.New().String()) } ips := utilnet.SourceIPs(req) @@ -68,9 +69,9 @@ func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs a if user := attribs.GetUser(); user != nil { ev.User.Username = user.GetName() - ev.User.Extra = map[string]auditinternal.ExtraValue{} + ev.User.Extra = map[string]authnv1.ExtraValue{} for k, v := range user.GetExtra() { - ev.User.Extra[k] = auditinternal.ExtraValue(v) + ev.User.Extra[k] = authnv1.ExtraValue(v) } ev.User.Groups = user.GetGroups() ev.User.UID = user.GetUID() @@ -95,14 +96,14 @@ func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) { if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { return } - ae.ImpersonatedUser = &auditinternal.UserInfo{ + ae.ImpersonatedUser = &authnv1.UserInfo{ Username: user.GetName(), } ae.ImpersonatedUser.Groups = user.GetGroups() ae.ImpersonatedUser.UID = user.GetUID() - ae.ImpersonatedUser.Extra = map[string]auditinternal.ExtraValue{} + ae.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{} for k, v := range user.GetExtra() { - ae.ImpersonatedUser.Extra[k] = auditinternal.ExtraValue(v) + ae.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD index 1da12201175..891ce7e279e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD @@ -27,8 +27,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/util/cert:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index 67958c3639b..b9c7e2e6eee 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -18,7 +18,6 @@ package authenticatorfactory import ( "errors" - "fmt" "time" "github.com/go-openapi/spec" @@ -33,8 +32,7 @@ import ( "k8s.io/apiserver/pkg/authentication/request/x509" "k8s.io/apiserver/pkg/authentication/token/cache" webhooktoken "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" - authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - "k8s.io/client-go/util/cert" + authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1" ) // DelegatingAuthenticatorConfig is the minimal configuration needed to create an authenticator @@ -48,8 +46,10 @@ type DelegatingAuthenticatorConfig struct { // CacheTTL is the length of time that a token authentication answer will be cached. CacheTTL time.Duration - // ClientCAFile is the CA bundle file used to authenticate client certificates - ClientCAFile string + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If this is nil, then mTLS will not be used. + ClientCertificateCAContentProvider CAContentProvider APIAudiences authenticator.Audiences @@ -63,28 +63,19 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur // front-proxy first, then remote // Add the front proxy authenticator if requested if c.RequestHeaderConfig != nil { - requestHeaderAuthenticator, err := headerrequest.NewSecure( - c.RequestHeaderConfig.ClientCA, + requestHeaderAuthenticator := headerrequest.NewDynamicVerifyOptionsSecure( + c.RequestHeaderConfig.CAContentProvider.VerifyOptions, c.RequestHeaderConfig.AllowedClientNames, c.RequestHeaderConfig.UsernameHeaders, c.RequestHeaderConfig.GroupHeaders, c.RequestHeaderConfig.ExtraHeaderPrefixes, ) - if err != nil { - return nil, nil, err - } authenticators = append(authenticators, requestHeaderAuthenticator) } // x509 client cert auth - if len(c.ClientCAFile) > 0 { - clientCAs, err := cert.NewPool(c.ClientCAFile) - if err != nil { - return nil, nil, fmt.Errorf("unable to load client CA file %s: %v", c.ClientCAFile, err) - } - verifyOpts := x509.DefaultVerifyOptions() - verifyOpts.Roots = clientCAs - authenticators = append(authenticators, x509.New(verifyOpts, x509.CommonNameUserConversion)) + if c.ClientCertificateCAContentProvider != nil { + authenticators = append(authenticators, x509.NewDynamic(c.ClientCertificateCAContentProvider.VerifyOptions, x509.CommonNameUserConversion)) } if c.TokenAccessReviewClient != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go index 3eeb238f056..b5aa1c524a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go @@ -16,16 +16,33 @@ limitations under the License. package authenticatorfactory +import ( + "crypto/x509" + + "k8s.io/apiserver/pkg/authentication/request/headerrequest" +) + type RequestHeaderConfig struct { // UsernameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. - UsernameHeaders []string + UsernameHeaders headerrequest.StringSliceProvider // GroupHeaders are the headers to check (case-insensitively) for a group names. All values will be used. - GroupHeaders []string + GroupHeaders headerrequest.StringSliceProvider // ExtraHeaderPrefixes are the head prefixes to check (case-insentively) for filling in // the user.Info.Extra. All values of all matching headers will be added. - ExtraHeaderPrefixes []string - // ClientCA points to CA bundle file which is used verify the identity of the front proxy - ClientCA string + ExtraHeaderPrefixes headerrequest.StringSliceProvider + // CAContentProvider the options for verifying incoming connections using mTLS. Generally this points to CA bundle file which is used verify the identity of the front proxy. + // It may produce different options at will. + CAContentProvider CAContentProvider // AllowedClientNames is a list of common names that may be presented by the authenticating front proxy. Empty means: accept any. - AllowedClientNames []string + AllowedClientNames headerrequest.StringSliceProvider +} + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCABundleContent provides ca bundle byte content + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators + VerifyOptions() (x509.VerifyOptions, bool) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD index cc9e53ea103..9aa48d1caa4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD @@ -19,7 +19,6 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest", importpath = "k8s.io/apiserver/pkg/authentication/request/headerrequest", deps = [ - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/request/x509:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go index 70af861d8b5..abf509a97d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -24,26 +24,47 @@ import ( "net/url" "strings" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/authenticator" x509request "k8s.io/apiserver/pkg/authentication/request/x509" "k8s.io/apiserver/pkg/authentication/user" utilcert "k8s.io/client-go/util/cert" ) +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} + type requestHeaderAuthRequestHandler struct { // nameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. - nameHeaders []string + nameHeaders StringSliceProvider // groupHeaders are the headers to check (case-insensitively) for group membership. All values of all headers will be added. - groupHeaders []string + groupHeaders StringSliceProvider // extraHeaderPrefixes are the head prefixes to check (case-insensitively) for filling in // the user.Info.Extra. All values of all matching headers will be added. - extraHeaderPrefixes []string + extraHeaderPrefixes StringSliceProvider } -func New(nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []string) (authenticator.Request, error) { +func New(nameHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator.Request, error) { trimmedNameHeaders, err := trimHeaders(nameHeaders...) if err != nil { return nil, err @@ -57,11 +78,19 @@ func New(nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []stri return nil, err } + return NewDynamic( + StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedGroupHeaders), + StaticStringSlice(trimmedExtraHeaderPrefixes), + ), nil +} + +func NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { return &requestHeaderAuthRequestHandler{ - nameHeaders: trimmedNameHeaders, - groupHeaders: trimmedGroupHeaders, - extraHeaderPrefixes: trimmedExtraHeaderPrefixes, - }, nil + nameHeaders: nameHeaders, + groupHeaders: groupHeaders, + extraHeaderPrefixes: extraHeaderPrefixes, + } } func trimHeaders(headerNames ...string) ([]string, error) { @@ -78,11 +107,6 @@ func trimHeaders(headerNames ...string) ([]string, error) { } func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []string) (authenticator.Request, error) { - headerAuthenticator, err := New(nameHeaders, groupHeaders, extraHeaderPrefixes) - if err != nil { - return nil, err - } - if len(clientCA) == 0 { return nil, fmt.Errorf("missing clientCA file") } @@ -102,26 +126,51 @@ func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, opts.Roots.AddCert(cert) } - return x509request.NewVerifier(opts, headerAuthenticator, sets.NewString(proxyClientNames...)), nil + trimmedNameHeaders, err := trimHeaders(nameHeaders...) + if err != nil { + return nil, err + } + trimmedGroupHeaders, err := trimHeaders(groupHeaders...) + if err != nil { + return nil, err + } + trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...) + if err != nil { + return nil, err + } + + return NewDynamicVerifyOptionsSecure( + x509request.StaticVerifierFn(opts), + StaticStringSlice(proxyClientNames), + StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedGroupHeaders), + StaticStringSlice(trimmedExtraHeaderPrefixes), + ), nil +} + +func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes) + + return x509request.NewDynamicCAVerifier(verifyOptionFn, headerAuthenticator, proxyClientNames) } func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { - name := headerValue(req.Header, a.nameHeaders) + name := headerValue(req.Header, a.nameHeaders.Value()) if len(name) == 0 { return nil, false, nil } - groups := allHeaderValues(req.Header, a.groupHeaders) - extra := newExtra(req.Header, a.extraHeaderPrefixes) + groups := allHeaderValues(req.Header, a.groupHeaders.Value()) + extra := newExtra(req.Header, a.extraHeaderPrefixes.Value()) // clear headers used for authentication - for _, headerName := range a.nameHeaders { + for _, headerName := range a.nameHeaders.Value() { req.Header.Del(headerName) } - for _, headerName := range a.groupHeaders { + for _, headerName := range a.groupHeaders.Value() { req.Header.Del(headerName) } for k := range extra { - for _, prefix := range a.extraHeaderPrefixes { + for _, prefix := range a.extraHeaderPrefixes.Value() { req.Header.Del(prefix + k) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD index f75340b599d..dd875239cc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD @@ -27,6 +27,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "verify_options.go", "x509.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/request/x509", @@ -36,6 +37,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go new file mode 100644 index 00000000000..462eb4cc95f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509 + +import ( + "crypto/x509" + "fmt" + + "k8s.io/client-go/util/cert" +) + +// StaticVerifierFn is a VerifyOptionFunc that always returns the same value. This allows verify options that cannot change. +func StaticVerifierFn(opts x509.VerifyOptions) VerifyOptionFunc { + return func() (x509.VerifyOptions, bool) { + return opts, true + } +} + +// NewStaticVerifierFromFile creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func NewStaticVerifierFromFile(clientCA string) (VerifyOptionFunc, error) { + if len(clientCA) == 0 { + return nil, nil + } + + // Wrap with an x509 verifier + var err error + opts := DefaultVerifyOptions() + opts.Roots, err = cert.NewPool(clientCA) + if err != nil { + return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err) + } + + return StaticVerifierFn(opts), nil +} + +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index d45c863402c..da3ef45d26c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -82,16 +82,28 @@ func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Resp return f(chain) } +// VerifyOptionFunc is function which provides a shallow copy of the VerifyOptions to the authenticator. This allows +// for cases where the options (particularly the CAs) can change. If the bool is false, then the returned VerifyOptions +// are ignored and the authenticator will express "no opinion". This allows a clear signal for cases where a CertPool +// is eventually expected, but not currently present. +type VerifyOptionFunc func() (x509.VerifyOptions, bool) + // Authenticator implements request.Authenticator by extracting user info from verified client certificates type Authenticator struct { - opts x509.VerifyOptions - user UserConversion + verifyOptionsFn VerifyOptionFunc + user UserConversion } // New returns a request.Authenticator that verifies client certificates using the provided // VerifyOptions, and converts valid certificate chains into user.Info using the provided UserConversion func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { - return &Authenticator{opts, user} + return NewDynamic(StaticVerifierFn(opts), user) +} + +// NewDynamic returns a request.Authenticator that verifies client certificates using the provided +// VerifyOptionFunc (which may be dynamic), and converts valid certificate chains into user.Info using the provided UserConversion +func NewDynamic(verifyOptionsFn VerifyOptionFunc, user UserConversion) *Authenticator { + return &Authenticator{verifyOptionsFn, user} } // AuthenticateRequest authenticates the request using presented client certificates @@ -101,7 +113,11 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R } // Use intermediates, if provided - optsCopy := a.opts + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { optsCopy.Intermediates = x509.NewCertPool() for _, intermediate := range req.TLS.PeerCertificates[1:] { @@ -133,17 +149,23 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R // Verifier implements request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth type Verifier struct { - opts x509.VerifyOptions - auth authenticator.Request + verifyOptionsFn VerifyOptionFunc + auth authenticator.Request // allowedCommonNames contains the common names which a verified certificate is allowed to have. // If empty, all verified certificates are allowed. - allowedCommonNames sets.String + allowedCommonNames StringSliceProvider } // NewVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCommonNames sets.String) authenticator.Request { - return &Verifier{opts, auth, allowedCommonNames} + return NewDynamicCAVerifier(StaticVerifierFn(opts), auth, StaticStringSlice(allowedCommonNames.List())) +} + +// NewDynamicCAVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +// TODO make the allowedCommonNames dynamic +func NewDynamicCAVerifier(verifyOptionsFn VerifyOptionFunc, auth authenticator.Request, allowedCommonNames StringSliceProvider) authenticator.Request { + return &Verifier{verifyOptionsFn, auth, allowedCommonNames} } // AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth @@ -153,7 +175,11 @@ func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Respon } // Use intermediates, if provided - optsCopy := a.opts + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { optsCopy.Intermediates = x509.NewCertPool() for _, intermediate := range req.TLS.PeerCertificates[1:] { @@ -172,12 +198,14 @@ func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Respon func (a *Verifier) verifySubject(subject pkix.Name) error { // No CN restrictions - if len(a.allowedCommonNames) == 0 { + if len(a.allowedCommonNames.Value()) == 0 { return nil } // Enforce CN restrictions - if a.allowedCommonNames.Has(subject.CommonName) { - return nil + for _, allowedCommonName := range a.allowedCommonNames.Value() { + if allowedCommonName == subject.CommonName { + return nil + } } return fmt.Errorf("x509: subject with cn=%s is not in the allowed list", subject.CommonName) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD index 66c09d44844..1f08a3a050f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD @@ -15,9 +15,10 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go index 18d5692d7a7..8e0520af169 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -19,20 +19,20 @@ package cache import ( "time" - lrucache "k8s.io/apimachinery/pkg/util/cache" + utilcache "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apimachinery/pkg/util/clock" ) type simpleCache struct { - lru *lrucache.LRUExpireCache + cache *utilcache.Expiring } -func newSimpleCache(size int, clock clock.Clock) cache { - return &simpleCache{lru: lrucache.NewLRUExpireCacheWithClock(size, clock)} +func newSimpleCache(clock clock.Clock) cache { + return &simpleCache{cache: utilcache.NewExpiringWithClock(clock)} } func (c *simpleCache) get(key string) (*cacheRecord, bool) { - record, ok := c.lru.Get(key) + record, ok := c.cache.Get(key) if !ok { return nil, false } @@ -41,9 +41,9 @@ func (c *simpleCache) get(key string) (*cacheRecord, bool) { } func (c *simpleCache) set(key string, value *cacheRecord, ttl time.Duration) { - c.lru.Add(key, value, ttl) + c.cache.Set(key, value, ttl) } func (c *simpleCache) remove(key string) { - c.lru.Remove(key) + c.cache.Delete(key) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go index ea3853a38b4..ef0a8c87215 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -18,8 +18,15 @@ package cache import ( "context" - "fmt" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "hash" + "io" + "sync" "time" + "unsafe" utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/authentication/authenticator" @@ -40,6 +47,11 @@ type cachedTokenAuthenticator struct { failureTTL time.Duration cache cache + + // hashPool is a per authenticator pool of hash.Hash (to avoid allocations from building the Hash) + // HMAC with SHA-256 and a random key is used to prevent precomputation and length extension attacks + // It also mitigates hash map DOS attacks via collisions (the inputs are supplied by untrusted users) + hashPool *sync.Pool } type cache interface { @@ -57,12 +69,30 @@ func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureT } func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock utilclock.Clock) authenticator.Token { + randomCacheKey := make([]byte, 32) + if _, err := rand.Read(randomCacheKey); err != nil { + panic(err) // rand should never fail + } + return &cachedTokenAuthenticator{ authenticator: authenticator, cacheErrs: cacheErrs, successTTL: successTTL, failureTTL: failureTTL, - cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(128, clock) }), + // Cache performance degrades noticeably when the number of + // tokens in operation exceeds the size of the cache. It is + // cheap to make the cache big in the second dimension below, + // the memory is only consumed when that many tokens are being + // used. Currently we advertise support 5k nodes and 10k + // namespaces; a 32k entry cache is therefore a 2x safety + // margin. + cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(clock) }), + + hashPool: &sync.Pool{ + New: func() interface{} { + return hmac.New(sha256.New, randomCacheKey) + }, + }, } } @@ -70,7 +100,7 @@ func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { auds, _ := authenticator.AudiencesFrom(ctx) - key := keyFunc(auds, token) + key := keyFunc(a.hashPool, auds, token) if record, ok := a.cache.get(key); ok { return record.resp, record.ok, record.err } @@ -90,6 +120,55 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token return resp, ok, err } -func keyFunc(auds []string, token string) string { - return fmt.Sprintf("%#v|%v", auds, token) +// keyFunc generates a string key by hashing the inputs. +// This lowers the memory requirement of the cache and keeps tokens out of memory. +func keyFunc(hashPool *sync.Pool, auds []string, token string) string { + h := hashPool.Get().(hash.Hash) + + h.Reset() + + // try to force stack allocation + var a [4]byte + b := a[:] + + writeLengthPrefixedString(h, b, token) + // encode the length of audiences to avoid ambiguities + writeLength(h, b, len(auds)) + for _, aud := range auds { + writeLengthPrefixedString(h, b, aud) + } + + key := toString(h.Sum(nil)) // skip base64 encoding to save an allocation + + hashPool.Put(h) + + return key +} + +// writeLengthPrefixedString writes s with a length prefix to prevent ambiguities, i.e. "xy" + "z" == "x" + "yz" +// the length of b is assumed to be 4 (b is mutated by this function to store the length of s) +func writeLengthPrefixedString(w io.Writer, b []byte, s string) { + writeLength(w, b, len(s)) + if _, err := w.Write(toBytes(s)); err != nil { + panic(err) // Write() on hash never fails + } +} + +// writeLength encodes length into b and then writes it via the given writer +// the length of b is assumed to be 4 +func writeLength(w io.Writer, b []byte, length int) { + binary.BigEndian.PutUint32(b, uint32(length)) + if _, err := w.Write(b); err != nil { + panic(err) // Write() on hash never fails + } +} + +// toBytes performs unholy acts to avoid allocations +func toBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&s)) +} + +// toString performs unholy acts to avoid allocations +func toString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index 5f212ca04eb..ce70710fa3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package authorizer import ( + "context" "net/http" "k8s.io/apiserver/pkg/authentication/user" @@ -67,12 +68,12 @@ type Attributes interface { // zero or more calls to methods of the Attributes interface. It returns nil when an action is // authorized, otherwise it returns an error. type Authorizer interface { - Authorize(a Attributes) (authorized Decision, reason string, err error) + Authorize(ctx context.Context, a Attributes) (authorized Decision, reason string, err error) } type AuthorizerFunc func(a Attributes) (Decision, string, error) -func (f AuthorizerFunc) Authorize(a Attributes) (Decision, string, error) { +func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, string, error) { return f(a) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD index b16931a0f41..06b8f73a5f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go index fc36bc0bc93..6fe3fa96ed8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go @@ -17,6 +17,7 @@ limitations under the License. package authorizerfactory import ( + "context" "errors" "k8s.io/apiserver/pkg/authentication/user" @@ -28,7 +29,7 @@ import ( // It is useful in tests and when using kubernetes in an open manner. type alwaysAllowAuthorizer struct{} -func (alwaysAllowAuthorizer) Authorize(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { +func (alwaysAllowAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { return authorizer.DecisionAllow, "", nil } @@ -56,7 +57,7 @@ func NewAlwaysAllowAuthorizer() *alwaysAllowAuthorizer { // It is useful in unit tests to force an operation to be forbidden. type alwaysDenyAuthorizer struct{} -func (alwaysDenyAuthorizer) Authorize(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { +func (alwaysDenyAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { return authorizer.DecisionNoOpinion, "Everything is forbidden.", nil } @@ -72,7 +73,7 @@ type privilegedGroupAuthorizer struct { groups []string } -func (r *privilegedGroupAuthorizer) Authorize(attr authorizer.Attributes) (authorizer.Decision, string, error) { +func (r *privilegedGroupAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (authorizer.Decision, string, error) { if attr.GetUser() == nil { return authorizer.DecisionNoOpinion, "Error", errors.New("no user on request.") } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go index c75c0a7552b..fa385e12554 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/plugin/pkg/authorizer/webhook" - authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" ) // DelegatingAuthorizerConfig is the minimal configuration needed to create an authenticator diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/union/union.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/union/union.go index 15753789ced..89d68ffed10 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/union/union.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/union/union.go @@ -25,6 +25,7 @@ limitations under the License. package union import ( + "context" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -41,14 +42,14 @@ func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { } // Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful -func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) (authorizer.Decision, string, error) { +func (authzHandler unionAuthzHandler) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { var ( errlist []error reasonlist []string ) for _, currAuthzHandler := range authzHandler { - decision, reason, err := currAuthzHandler.Authorize(a) + decision, reason, err := currAuthzHandler.Authorize(ctx, a) if err != nil { errlist = append(errlist, err) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/BUILD index 820223680b1..ceef2c63ee3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/BUILD @@ -22,6 +22,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD index d80a60ea96b..09824784fa7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD @@ -46,6 +46,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go index 81a13d64a00..2411a780d1c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go @@ -18,10 +18,12 @@ package discovery import ( "bytes" + "encoding/json" "fmt" "io" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog" ) const APIGroupPrefix = "/apis" @@ -36,9 +38,39 @@ func keepUnversioned(group string) bool { type stripVersionEncoder struct { encoder runtime.Encoder serializer runtime.Serializer + identifier runtime.Identifier +} + +func newStripVersionEncoder(e runtime.Encoder, s runtime.Serializer) runtime.Encoder { + return stripVersionEncoder{ + encoder: e, + serializer: s, + identifier: identifier(e), + } +} + +func identifier(e runtime.Encoder) runtime.Identifier { + result := map[string]string{ + "name": "stripVersion", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for stripVersionEncoder: %v", err) + } + return runtime.Identifier(identifier) } func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c stripVersionEncoder) doEncode(obj runtime.Object, w io.Writer) error { buf := bytes.NewBuffer([]byte{}) err := c.encoder.Encode(obj, buf) if err != nil { @@ -54,6 +86,11 @@ func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error { return c.serializer.Encode(roundTrippedObj, w) } +// Identifier implements runtime.Encoder interface. +func (c stripVersionEncoder) Identifier() runtime.Identifier { + return c.identifier +} + // stripVersionNegotiatedSerializer will return stripVersionEncoder when // EncoderForVersion is called. See comments for stripVersionEncoder. type stripVersionNegotiatedSerializer struct { @@ -69,5 +106,5 @@ func (n stripVersionNegotiatedSerializer) EncoderForVersion(encoder runtime.Enco panic(fmt.Sprintf("Unable to extract serializer from %#v", encoder)) } versioned := n.NegotiatedSerializer.EncoderForVersion(encoder, gv) - return stripVersionEncoder{versioned, serializer} + return newStripVersionEncoder(versioned, serializer) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD index 6dae2be189a..3ebc27f67f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -33,7 +33,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index 7e8f2baa50e..8fb5fa0bcef 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -20,6 +20,7 @@ import ( "errors" "net/http" "strings" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -58,8 +59,19 @@ var ( authenticatedAttemptsCounter = metrics.NewCounterVec( &metrics.CounterOpts{ - Name: "authentication_attempts", - Help: "Counter of authenticated attempts.", + Name: "authentication_attempts", + Help: "Counter of authenticated attempts.", + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) + + authenticationLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "authentication_duration_seconds", + Help: "Authentication duration in seconds broken out by result.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, }, []string{"result"}, ) @@ -68,6 +80,7 @@ var ( func init() { legacyregistry.MustRegister(authenticatedUserCounter) legacyregistry.MustRegister(authenticatedAttemptsCounter) + legacyregistry.MustRegister(authenticationLatency) } // WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then @@ -80,6 +93,8 @@ func WithAuthentication(handler http.Handler, auth authenticator.Request, failed return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + authenticationStart := time.Now() + if len(apiAuds) > 0 { req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds)) } @@ -88,16 +103,21 @@ func WithAuthentication(handler http.Handler, auth authenticator.Request, failed if err != nil { klog.Errorf("Unable to authenticate the request due to an error: %v", err) authenticatedAttemptsCounter.WithLabelValues(errorLabel).Inc() + authenticationLatency.WithLabelValues(errorLabel).Observe(time.Since(authenticationStart).Seconds()) } else if !ok { authenticatedAttemptsCounter.WithLabelValues(failureLabel).Inc() + authenticationLatency.WithLabelValues(failureLabel).Observe(time.Since(authenticationStart).Seconds()) } failed.ServeHTTP(w, req) return } - // TODO(mikedanese): verify the response audience matches one of apiAuds if - // non-empty + if len(apiAuds) > 0 && len(resp.Audiences) > 0 && len(authenticator.Audiences(apiAuds).Intersect(resp.Audiences)) == 0 { + klog.Errorf("Unable to match the audience: %v , accepted: %v", resp.Audiences, apiAuds) + failed.ServeHTTP(w, req) + return + } // authorization header is not required anymore in case of a successful authentication. req.Header.Del("Authorization") @@ -106,6 +126,7 @@ func WithAuthentication(handler http.Handler, auth authenticator.Request, failed authenticatedUserCounter.WithLabelValues(compressUsername(resp.User.GetName())).Inc() authenticatedAttemptsCounter.WithLabelValues(successLabel).Inc() + authenticationLatency.WithLabelValues(successLabel).Observe(time.Since(authenticationStart).Seconds()) handler.ServeHTTP(w, req) }) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index c6ab15b3d67..73bbe6b3fb7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -56,7 +56,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. responsewriters.InternalError(w, req, err) return } - authorized, reason, err := a.Authorize(attributes) + authorized, reason, err := a.Authorize(ctx, attributes) // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. if authorized == authorizer.DecisionAllow { audit.LogAnnotation(ae, decisionAnnotationKey, decisionAllow) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index e71533cd261..a18b51ba00d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -109,7 +109,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. return } - decision, reason, err := a.Authorize(actingAsAttributes) + decision, reason, err := a.Authorize(ctx, actingAsAttributes) if err != nil || decision != authorizer.DecisionAllow { klog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 64d3b14d241..2b1e7c591b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -10,7 +10,9 @@ go_test( name = "go_default_test", srcs = [ "create_test.go", + "helpers_test.go", "namer_test.go", + "response_test.go", "rest_test.go", ], embed = [":go_default_library"], @@ -21,6 +23,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -33,11 +36,13 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/example:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/example/v1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/utils/trace:go_default_library", ], ) @@ -49,6 +54,7 @@ go_library( "delete.go", "doc.go", "get.go", + "helpers.go", "namer.go", "patch.go", "response.go", @@ -62,6 +68,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", @@ -73,6 +80,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 6dc143a7b06..fa0d4a24120 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -27,7 +27,7 @@ import ( "unicode/utf8" "k8s.io/apimachinery/pkg/api/errors" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" @@ -46,7 +46,7 @@ import ( func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // For performance tracking purposes. - trace := utiltrace.New("Create", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("Create", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { @@ -99,7 +99,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int options := &metav1.CreateOptions{} values := req.URL.Query() - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index f84e62332ab..b1b0ad95b98 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" @@ -44,7 +45,7 @@ import ( func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // For performance tracking purposes. - trace := utiltrace.New("Delete", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("Delete", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { @@ -80,7 +81,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc return } if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversion.Codecs) + s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return @@ -88,7 +89,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") - obj, _, err := metainternalversion.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + obj, _, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return @@ -103,7 +104,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) trace.Step("Recorded the audit event") } else { - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return @@ -191,7 +192,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc } listOptions := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return @@ -240,7 +241,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc ae := request.AuditEventFrom(ctx) audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) } else { - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD index fc70f5759f9..dad87445a77 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD @@ -2,7 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["fieldmanager.go"], + srcs = [ + "buildmanagerinfo.go", + "capmanagers.go", + "fieldmanager.go", + "skipnonapplied.go", + "stripmeta.go", + "structuredmerge.go", + ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager", importpath = "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager", visibility = ["//visibility:public"], @@ -41,16 +48,30 @@ filegroup( go_test( name = "go_default_test", - srcs = ["fieldmanager_test.go"], + srcs = [ + "capmanagers_test.go", + "fieldmanager_test.go", + "skipnonapplied_test.go", + ], + data = [ + "endpoints.yaml", + "node.yaml", + "pod.yaml", + "//api/openapi-spec", + ], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto/testing:go_default_library", + "//vendor/sigs.k8s.io/structured-merge-diff/fieldpath:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go new file mode 100644 index 00000000000..450adad05d1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj runtime.Object, patch []byte, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, patch, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return internal.BuildManagerIdentifier(&managerInfo) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go new file mode 100644 index 00000000000..8c38461613f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "sort" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj runtime.Object, patch []byte, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, patch, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if fields.Applied() == false { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, nTime := managed.Times()[updaters[i]], managed.Times()[updaters[j]], &metav1.Time{Time: time.Time{}} + if iTime == nil { + iTime = nTime + } + if jTime == nil { + jTime = nTime + } + if !iTime.Equal(jTime) { + return iTime.Before(jTime) + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := internal.BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml new file mode 100644 index 00000000000..a667e983426 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index 54a4eae00f6..3f2b08984ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -18,309 +18,144 @@ package fieldmanager import ( "fmt" - "time" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" - "k8s.io/klog" openapiproto "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/fieldpath" - "sigs.k8s.io/structured-merge-diff/merge" - "sigs.k8s.io/yaml" ) +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + Apply(liveObj runtime.Object, patch []byte, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} + // FieldManager updates the managed fields and merge applied // configurations. type FieldManager struct { - typeConverter internal.TypeConverter - objectConverter runtime.ObjectConvertor - objectDefaulter runtime.ObjectDefaulter - groupVersion schema.GroupVersion - hubVersion schema.GroupVersion - updater merge.Updater + fieldManager Manager +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager) *FieldManager { + return &FieldManager{f} } -// NewFieldManager creates a new FieldManager that merges apply requests +// NewDefaultFieldManager creates a new FieldManager that merges apply requests // and update managed fields for other types of requests. -func NewFieldManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (*FieldManager, error) { - typeConverter, err := internal.NewTypeConverter(models, false) +func NewDefaultFieldManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion) (*FieldManager, error) { + f, err := NewStructuredMergeManager(models, objectConverter, objectDefaulter, kind.GroupVersion(), hub) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create field manager: %v", err) } - - return &FieldManager{ - typeConverter: typeConverter, - objectConverter: objectConverter, - objectDefaulter: objectDefaulter, - groupVersion: gv, - hubVersion: hub, - updater: merge.Updater{ - Converter: internal.NewVersionConverter(typeConverter, objectConverter, hub), - }, - }, nil + return newDefaultFieldManager(f, objectCreater, kind), nil } -// NewCRDFieldManager creates a new FieldManager specifically for +// NewDefaultCRDFieldManager creates a new FieldManager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. -func NewCRDFieldManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, preserveUnknownFields bool) (_ *FieldManager, err error) { - var typeConverter internal.TypeConverter = internal.DeducedTypeConverter{} - if models != nil { - typeConverter, err = internal.NewTypeConverter(models, preserveUnknownFields) - if err != nil { - return nil, err - } +func NewDefaultCRDFieldManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, preserveUnknownFields bool) (_ *FieldManager, err error) { + f, err := NewCRDStructuredMergeManager(models, objectConverter, objectDefaulter, kind.GroupVersion(), hub, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) } - return &FieldManager{ - typeConverter: typeConverter, - objectConverter: objectConverter, - objectDefaulter: objectDefaulter, - groupVersion: gv, - hubVersion: hub, - updater: merge.Updater{ - Converter: internal.NewCRDVersionConverter(typeConverter, objectConverter, hub), - }, - }, nil + return newDefaultFieldManager(f, objectCreater, kind), nil +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func newDefaultFieldManager(f Manager, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind) *FieldManager { + f = NewStripMetaManager(f) + f = NewBuildManagerInfoManager(f, kind.GroupVersion()) + f = NewCapManagersManager(f, DefaultMaxUpdateManagers) + f = NewSkipNonAppliedManager(f, objectCreater, kind) + return NewFieldManager(f) } // Update is used when the object has already been merged (non-apply // use-case), and simply updates the managed fields in the output // object. -func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (runtime.Object, error) { +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { // If the object doesn't have metadata, we should just return without trying to // set the managedFields at all, so creates/updates/patches will work normally. - if _, err := meta.Accessor(newObj); err != nil { + if _, err = meta.Accessor(newObj); err != nil { return newObj, nil } // First try to decode the managed fields provided in the update, // This is necessary to allow directly updating managed fields. - managed, err := internal.DecodeObjectManagedFields(newObj) - - // If the managed field is empty or we failed to decode it, - // let's try the live object. This is to prevent clients who - // don't understand managedFields from deleting it accidentally. - if err != nil || len(managed.Fields) == 0 { + var managed Managed + if managed, err = internal.DecodeObjectManagedFields(newObj); err != nil || len(managed.Fields()) == 0 { + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. managed, err = internal.DecodeObjectManagedFields(liveObj) if err != nil { return nil, fmt.Errorf("failed to decode managed fields: %v", err) } } - // if managed field is still empty, skip updating managed fields altogether - if len(managed.Fields) == 0 { - return newObj, nil - } - newObjVersioned, err := f.toVersioned(newObj) - if err != nil { - return nil, fmt.Errorf("failed to convert new object to proper version: %v", err) - } - liveObjVersioned, err := f.toVersioned(liveObj) - if err != nil { - return nil, fmt.Errorf("failed to convert live object to proper version: %v", err) - } - internal.RemoveObjectManagedFields(liveObjVersioned) - internal.RemoveObjectManagedFields(newObjVersioned) - newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) - if err != nil { - // Return newObj and just by-pass fields update. This really shouldn't happen. - klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed new object: %v", err) - return newObj, nil - } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) - if err != nil { - // Return newObj and just by-pass fields update. This really shouldn't happen. - klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed live object: %v", err) - return newObj, nil - } - apiVersion := fieldpath.APIVersion(f.groupVersion.String()) - - // TODO(apelisse) use the first return value when unions are implemented - _, managed.Fields, err = f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields, manager) - if err != nil { - return nil, fmt.Errorf("failed to update ManagedFields: %v", err) - } - managed.Fields = f.stripFields(managed.Fields, manager) - // If the current operation took any fields from anything, it means the object changed, - // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager - if vs, ok := managed.Fields[manager]; ok { - delete(managed.Fields, manager) + internal.RemoveObjectManagedFields(liveObj) + internal.RemoveObjectManagedFields(newObj) - // Build a manager identifier which will only match previous updates from the same manager - manager, err = f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) - if err != nil { - return nil, fmt.Errorf("failed to build manager identifier: %v", err) - } - - managed.Times[manager] = &metav1.Time{Time: time.Now().UTC()} - if previous, ok := managed.Fields[manager]; ok { - managed.Fields[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) - } else { - managed.Fields[manager] = vs - } + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err } - if err := internal.EncodeObjectManagedFields(newObj, managed); err != nil { + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { return nil, fmt.Errorf("failed to encode managed fields: %v", err) } - return newObj, nil + return object, nil } // Apply is used when server-side apply is called, as it merges the -// object and update the managed fields. -func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, fieldManager string, force bool) (runtime.Object, error) { +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, manager string, force bool) (object runtime.Object, err error) { // If the object doesn't have metadata, apply isn't allowed. - accessor, err := meta.Accessor(liveObj) - if err != nil { + if _, err = meta.Accessor(liveObj); err != nil { return nil, fmt.Errorf("couldn't get accessor: %v", err) } - missingManagedFields := (len(accessor.GetManagedFields()) == 0) - managed, err := internal.DecodeObjectManagedFields(liveObj) - if err != nil { + // Decode the managed fields in the live object, since it isn't allowed in the patch. + var managed Managed + if managed, err = internal.DecodeObjectManagedFields(liveObj); err != nil { return nil, fmt.Errorf("failed to decode managed fields: %v", err) } - // Check that the patch object has the same version as the live object - patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} - if err := yaml.Unmarshal(patch, &patchObj.Object); err != nil { - return nil, fmt.Errorf("error decoding YAML: %v", err) - } - if patchObj.GetAPIVersion() != f.groupVersion.String() { - return nil, - errors.NewBadRequest( - fmt.Sprintf("Incorrect version specified in apply patch. "+ - "Specified patch version: %s, expected: %s", - patchObj.GetAPIVersion(), f.groupVersion.String())) - } + internal.RemoveObjectManagedFields(liveObj) - liveObjVersioned, err := f.toVersioned(liveObj) - if err != nil { - return nil, fmt.Errorf("failed to convert live object to proper version: %v", err) - } - internal.RemoveObjectManagedFields(liveObjVersioned) - - patchObjTyped, err := f.typeConverter.YAMLToTyped(patch) - if err != nil { - return nil, fmt.Errorf("failed to create typed patch object: %v", err) - } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) - if err != nil { - return nil, fmt.Errorf("failed to create typed live object: %v", err) - } - manager, err := f.buildManagerInfo(fieldManager, metav1.ManagedFieldsOperationApply) - if err != nil { - return nil, fmt.Errorf("failed to build manager identifier: %v", err) - } - - apiVersion := fieldpath.APIVersion(f.groupVersion.String()) - // if managed field is missing, create a single entry for all the fields - if missingManagedFields { - unknownManager, err := internal.BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ - Manager: "before-first-apply", - Operation: metav1.ManagedFieldsOperationUpdate, - APIVersion: f.groupVersion.String(), - }) - if err != nil { - return nil, fmt.Errorf("failed to create manager for existing fields: %v", err) - } - unknownFieldSet, err := liveObjTyped.ToFieldSet() - if err != nil { - return nil, fmt.Errorf("failed to create fieldset for existing fields: %v", err) - } - managed.Fields[unknownManager] = fieldpath.NewVersionedSet(unknownFieldSet, apiVersion, false) - f.stripFields(managed.Fields, unknownManager) - } - newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields, manager, force) - if err != nil { - if conflicts, ok := err.(merge.Conflicts); ok { - return nil, internal.NewConflictError(conflicts) - } + if object, managed, err = f.fieldManager.Apply(liveObj, patch, managed, manager, force); err != nil { return nil, err } - managed.Fields = f.stripFields(managedFields, manager) - // Update the time in the managedFieldsEntry for this operation - managed.Times[manager] = &metav1.Time{Time: time.Now().UTC()} - - newObj, err := f.typeConverter.TypedToObject(newObjTyped) - if err != nil { - return nil, fmt.Errorf("failed to convert new typed object to object: %v", err) - } - - if err := internal.EncodeObjectManagedFields(newObj, managed); err != nil { + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { return nil, fmt.Errorf("failed to encode managed fields: %v", err) } - newObjVersioned, err := f.toVersioned(newObj) - if err != nil { - return nil, fmt.Errorf("failed to convert new object to proper version: %v", err) - } - f.objectDefaulter.Default(newObjVersioned) - - newObjUnversioned, err := f.toUnversioned(newObjVersioned) - if err != nil { - return nil, fmt.Errorf("failed to convert to unversioned: %v", err) - } - return newObjUnversioned, nil -} - -func (f *FieldManager) toVersioned(obj runtime.Object) (runtime.Object, error) { - return f.objectConverter.ConvertToVersion(obj, f.groupVersion) -} - -func (f *FieldManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { - return f.objectConverter.ConvertToVersion(obj, f.hubVersion) -} - -func (f *FieldManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { - managerInfo := metav1.ManagedFieldsEntry{ - Manager: prefix, - Operation: operation, - APIVersion: f.groupVersion.String(), - } - if managerInfo.Manager == "" { - managerInfo.Manager = "unknown" - } - return internal.BuildManagerIdentifier(&managerInfo) -} - -// stripSet is the list of fields that should never be part of a mangedFields. -var stripSet = fieldpath.NewSet( - fieldpath.MakePathOrDie("apiVersion"), - fieldpath.MakePathOrDie("kind"), - fieldpath.MakePathOrDie("metadata"), - fieldpath.MakePathOrDie("metadata", "name"), - fieldpath.MakePathOrDie("metadata", "namespace"), - fieldpath.MakePathOrDie("metadata", "creationTimestamp"), - fieldpath.MakePathOrDie("metadata", "selfLink"), - fieldpath.MakePathOrDie("metadata", "uid"), - fieldpath.MakePathOrDie("metadata", "clusterName"), - fieldpath.MakePathOrDie("metadata", "generation"), - fieldpath.MakePathOrDie("metadata", "managedFields"), - fieldpath.MakePathOrDie("metadata", "resourceVersion"), -) - -// stripFields removes a predefined set of paths found in typed from managed and returns the updated ManagedFields -func (f *FieldManager) stripFields(managed fieldpath.ManagedFields, manager string) fieldpath.ManagedFields { - vs, ok := managed[manager] - if ok { - if vs == nil { - panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) - } - newSet := vs.Set().Difference(stripSet) - if newSet.Empty() { - delete(managed, manager) - } else { - managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) - } - } - - return managed + return object, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD index 6b57a3484b2..3d1ad13d990 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD @@ -27,7 +27,6 @@ go_library( "//vendor/sigs.k8s.io/structured-merge-diff/merge:go_default_library", "//vendor/sigs.k8s.io/structured-merge-diff/typed:go_default_library", "//vendor/sigs.k8s.io/structured-merge-diff/value:go_default_library", - "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go index 1f0eb6d3a44..74e30a4e9cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go @@ -28,10 +28,38 @@ import ( "sigs.k8s.io/structured-merge-diff/fieldpath" ) -// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. -type Managed struct { - Fields fieldpath.ManagedFields - Times map[string]*metav1.Time +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } } // RemoveObjectManagedFields removes the ManagedFields from the object @@ -46,9 +74,9 @@ func RemoveObjectManagedFields(obj runtime.Object) { } // DecodeObjectManagedFields extracts and converts the objects ManagedFields into a fieldpath.ManagedFields. -func DecodeObjectManagedFields(from runtime.Object) (Managed, error) { +func DecodeObjectManagedFields(from runtime.Object) (ManagedInterface, error) { if from == nil { - return Managed{}, nil + return &managedStruct{}, nil } accessor, err := meta.Accessor(from) if err != nil { @@ -57,13 +85,13 @@ func DecodeObjectManagedFields(from runtime.Object) (Managed, error) { managed, err := decodeManagedFields(accessor.GetManagedFields()) if err != nil { - return Managed{}, fmt.Errorf("failed to convert managed fields from API: %v", err) + return nil, fmt.Errorf("failed to convert managed fields from API: %v", err) } - return managed, err + return &managed, nil } // EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields -func EncodeObjectManagedFields(obj runtime.Object, managed Managed) error { +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { accessor, err := meta.Accessor(obj) if err != nil { panic(fmt.Sprintf("couldn't get accessor: %v", err)) @@ -80,19 +108,19 @@ func EncodeObjectManagedFields(obj runtime.Object, managed Managed) error { // decodeManagedFields converts ManagedFields from the wire format (api format) // to the format used by sigs.k8s.io/structured-merge-diff -func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed Managed, err error) { - managed.Fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) - managed.Times = make(map[string]*metav1.Time, len(encodedManagedFields)) +func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed managedStruct, err error) { + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) for _, encodedVersionedSet := range encodedManagedFields { manager, err := BuildManagerIdentifier(&encodedVersionedSet) if err != nil { - return Managed{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + return managedStruct{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) } - managed.Fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) if err != nil { - return Managed{}, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + return managedStruct{}, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) } - managed.Times[manager] = encodedVersionedSet.Time + managed.times[manager] = encodedVersionedSet.Time } return managed, nil } @@ -139,15 +167,18 @@ func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (version // encodeManagedFields converts ManagedFields from the format used by // sigs.k8s.io/structured-merge-diff to the wire format (api format) -func encodeManagedFields(managed Managed) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } encodedManagedFields = []metav1.ManagedFieldsEntry{} - for manager := range managed.Fields { - versionedSet := managed.Fields[manager] + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] v, err := encodeManagerVersionedSet(manager, versionedSet) if err != nil { return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) } - if t, ok := managed.Times[manager]; ok { + if t, ok := managed.Times()[manager]; ok { v.Time = t } encodedManagedFields = append(encodedManagedFields, *v) @@ -174,7 +205,10 @@ func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) return p.Time.Before(q.Time) } - return p.Manager < q.Manager + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + return p.APIVersion < q.APIVersion }) return encodedManagedFields, nil diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go index 62f41507dcf..087b7636a00 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go @@ -77,7 +77,7 @@ func NewPathElement(s string) (fieldpath.PathElement, error) { if err != nil { return fieldpath.PathElement{}, err } - fields := []value.Field{} + fields := value.FieldList{} for k, v := range kv { b, err := json.Marshal(v) if err != nil { @@ -94,7 +94,7 @@ func NewPathElement(s string) (fieldpath.PathElement, error) { }) } return fieldpath.PathElement{ - Key: &value.Map{Items: fields}, + Key: &fields, }, nil default: // Ignore unknown key types @@ -109,7 +109,7 @@ func PathElementString(pe fieldpath.PathElement) (string, error) { return Field + Separator + *pe.FieldName, nil case pe.Key != nil: kv := map[string]json.RawMessage{} - for _, k := range pe.Key.Items { + for _, k := range *pe.Key { b, err := k.Value.ToJSON() if err != nil { return "", err diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go index 44ad55916fa..6d10d31d0a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go @@ -25,14 +25,12 @@ import ( "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/typed" "sigs.k8s.io/structured-merge-diff/value" - "sigs.k8s.io/yaml" ) // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. type TypeConverter interface { ObjectToTyped(runtime.Object) (*typed.TypedValue, error) - YAMLToTyped([]byte) (*typed.TypedValue, error) TypedToObject(*typed.TypedValue) (runtime.Object, error) } @@ -58,11 +56,6 @@ func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue return typed.DeducedParseableType.FromUnstructured(u) } -// YAMLToTyped parses a yaml object into a TypedValue with a "deduced type". -func (DeducedTypeConverter) YAMLToTyped(from []byte) (*typed.TypedValue, error) { - return typed.DeducedParseableType.FromYAML(typed.YAMLObject(from)) -} - // TypedToObject transforms the typed value into a runtime.Object. That // is not specific to deduced type. func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { @@ -99,21 +92,6 @@ func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, er return t.FromUnstructured(u) } -func (c *typeConverter) YAMLToTyped(from []byte) (*typed.TypedValue, error) { - unstructured := &unstructured.Unstructured{Object: map[string]interface{}{}} - - if err := yaml.Unmarshal(from, &unstructured.Object); err != nil { - return nil, fmt.Errorf("error decoding YAML: %v", err) - } - - gvk := unstructured.GetObjectKind().GroupVersionKind() - t := c.parser.Type(gvk) - if t == nil { - return nil, newNoCorrespondingTypeError(gvk) - } - return t.FromYAML(typed.YAMLObject(string(from))) -} - func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { return valueToObject(value.AsValue()) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml new file mode 100644 index 00000000000..e04393811e7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml @@ -0,0 +1,259 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + beta.kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - k8s.gcr.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - k8s.gcr.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - k8s.gcr.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - k8s.gcr.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - k8s.gcr.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - k8s.gcr.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - k8s.gcr.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - k8s.gcr.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - k8s.gcr.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - k8s.gcr.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - k8s.gcr.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - k8s.gcr.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - k8s.gcr.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - k8s.gcr.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - k8s.gcr.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - k8s.gcr.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - k8s.gcr.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - k8s.gcr.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - k8s.gcr.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - k8s.gcr.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - k8s.gcr.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - k8s.gcr.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - k8s.gcr.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - k8s.gcr.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - k8s.gcr.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - k8s.gcr.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - k8s.gcr.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml new file mode 100644 index 00000000000..3fb0877d67c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go new file mode 100644 index 00000000000..4230a61d644 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + gvk schema.GroupVersionKind + beforeApplyManagerName string +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + gvk: gvk, + beforeApplyManagerName: "before-first-apply", + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + return newObj, managed, nil + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj runtime.Object, patch []byte, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + emptyObj, err := f.objectCreater.New(f.gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, patch, managed, fieldManager, force) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go new file mode 100644 index 00000000000..64457a0e2fe --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj runtime.Object, patch []byte, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, patch, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go new file mode 100644 index 00000000000..93fcc2dad11 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/klog" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/fieldpath" + "sigs.k8s.io/structured-merge-diff/merge" + "sigs.k8s.io/yaml" +) + +type structuredMergeManager struct { + typeConverter internal.TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (Manager, error) { + typeConverter, err := internal.NewTypeConverter(models, false) + if err != nil { + return nil, err + } + + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: internal.NewVersionConverter(typeConverter, objectConverter, hub), + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(models openapiproto.Models, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, preserveUnknownFields bool) (_ Manager, err error) { + var typeConverter internal.TypeConverter = internal.DeducedTypeConverter{} + if models != nil { + typeConverter, err = internal.NewTypeConverter(models, preserveUnknownFields) + if err != nil { + return nil, err + } + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: internal.NewCRDVersionConverter(typeConverter, objectConverter, hub), + }, + }, nil +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + // Return newObj and just by-pass fields update. This really shouldn't happen. + klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed new object: %v", err) + return newObj, managed, nil + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + // Return newObj and just by-pass fields update. This really shouldn't happen. + klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed live object: %v", err) + return newObj, managed, nil + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + self := "current-operation" + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), self) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields: %v", err) + } + managed = internal.NewManaged(managedFields, managed.Times()) + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + } + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj runtime.Object, patch []byte, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(patch, &patchObj.Object); err != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err)) + } + + // Check that the patch object has the same version as the live object + if patchObj.GetAPIVersion() != f.groupVersion.String() { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchObj.GetAPIVersion(), f.groupVersion.String())) + } + + if patchObj.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("metadata.managedFields must be nil")) + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object: %v", err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object: %v", err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, nil, internal.NewConflictError(conflicts) + } + return nil, nil, err + } + managed = internal.NewManaged(managedFields, managed.Times()) + + // Update the time in the managedFieldsEntry for this operation + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object to object: %v", err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned: %v", err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index 425ba323a24..3c51318e8ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" @@ -48,7 +49,7 @@ type getterFunc func(ctx context.Context, name string, req *http.Request, trace // passed-in getterFunc to perform the actual get. func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - trace := utiltrace.New("Get", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("Get", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) defer trace.LogIfLong(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) @@ -85,7 +86,7 @@ func GetResource(r rest.Getter, e rest.Exporter, scope *RequestScope) http.Handl options := metav1.GetOptions{} if values := req.URL.Query(); len(values) > 0 { exports := metav1.ExportOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { err = errors.NewBadRequest(err.Error()) return nil, err } @@ -95,7 +96,7 @@ func GetResource(r rest.Getter, e rest.Exporter, scope *RequestScope) http.Handl } return e.Export(ctx, name, exports) } - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { err = errors.NewBadRequest(err.Error()) return nil, err } @@ -165,7 +166,7 @@ func getRequestOptions(req *http.Request, scope *RequestScope, into runtime.Obje func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // For performance tracking purposes. - trace := utiltrace.New("List", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("List", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) namespace, err := scope.Namer.Namespace(req) if err != nil { @@ -191,7 +192,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc } opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go new file mode 100644 index 00000000000..82170e050ec --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +const ( + maxUserAgentLength = 1024 + userAgentTruncateSuffix = "...TRUNCATED" +) + +// lazyTruncatedUserAgent implements String() string and it will +// return user-agent which may be truncated. +type lazyTruncatedUserAgent struct { + req *http.Request +} + +func (lazy *lazyTruncatedUserAgent) String() string { + ua := "unknown" + if lazy.req != nil { + ua = utilnet.GetHTTPClient(lazy.req) + if len(ua) > maxUserAgentLength { + ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix + } + } + return ua +} + +// LazyClientIP implements String() string and it will +// calls GetClientIP() lazily only when required. +type lazyClientIP struct { + req *http.Request +} + +func (lazy *lazyClientIP) String() string { + if lazy.req != nil { + if ip := utilnet.GetClientIP(lazy.req); ip != nil { + return ip.String() + } + } + return "unknown" +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 718d61e676b..9dbad1ea65c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -234,13 +234,6 @@ func acceptMediaTypeOptions(params map[string]string, accepts *runtime.Serialize return options, true } -type candidateMediaType struct { - accepted *runtime.SerializerInfo - clauses goautoneg.Accept -} - -type candidateMediaTypeSlice []candidateMediaType - // NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and // a list of alternatives along with the accepted media type parameters. func NegotiateMediaTypeOptions(header string, accepted []runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { @@ -250,25 +243,21 @@ func NegotiateMediaTypeOptions(header string, accepted []runtime.SerializerInfo, }, true } - var candidates candidateMediaTypeSlice clauses := goautoneg.ParseAccept(header) - for _, clause := range clauses { + for i := range clauses { + clause := &clauses[i] for i := range accepted { accepts := &accepted[i] switch { case clause.Type == accepts.MediaTypeType && clause.SubType == accepts.MediaTypeSubType, clause.Type == accepts.MediaTypeType && clause.SubType == "*", clause.Type == "*" && clause.SubType == "*": - candidates = append(candidates, candidateMediaType{accepted: accepts, clauses: clause}) + if retVal, ret := acceptMediaTypeOptions(clause.Params, accepts, endpoint); ret { + return retVal, true + } } } } - for _, v := range candidates { - if retVal, ret := acceptMediaTypeOptions(v.clauses.Params, v.accepted, endpoint); ret { - return retVal, true - } - } - return MediaTypeOptions{}, false } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 10d5f032dce..4893578f1c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -26,7 +26,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" @@ -59,7 +59,7 @@ const ( func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interface, patchTypes []string) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // For performance tracking purposes. - trace := utiltrace.New("Patch", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("Patch", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { @@ -110,7 +110,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac } options := &metav1.PatchOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return @@ -337,6 +337,15 @@ func (p *jsonPatcher) createNewObject() (runtime.Object, error) { func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) { switch p.patchType { case types.JSONPatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := []interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + patchObj, err := jsonpatch.DecodePatch(p.patchBytes) if err != nil { return nil, errors.NewBadRequest(err.Error()) @@ -352,6 +361,15 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr } return patchedJS, nil case types.MergePatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := map[string]interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + return jsonpatch.MergePatch(versionedJS, p.patchBytes) default: // only here as a safety net - go-restful filters content-type diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go index c6d71841409..6a13a8d5ec9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation" @@ -38,6 +38,24 @@ import ( // the client's desired form, as well as ensuring any API level fields like self-link // are properly set. func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { + if co, ok := obj.(runtime.CacheableObject); ok { + if mediaType.Convert != nil { + // Non-nil mediaType.Convert means that some conversion of the object + // has to happen. Currently conversion may potentially modify the + // object or assume something about it (e.g. asTable operates on + // reflection, which won't work for any wrapper). + // To ensure it will work correctly, let's operate on base objects + // and not cache it for now. + // + // TODO: Long-term, transformObject should be changed so that it + // implements runtime.Encoder interface. + return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req) + } + } + return doTransformObject(ctx, obj, opts, mediaType, scope, req) +} + +func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { if _, ok := obj.(*metav1.Status); ok { return obj, nil } @@ -63,7 +81,7 @@ func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, return asTable(ctx, obj, options, scope, target.GroupVersion()) default: - accepted, _ := negotiation.MediaTypesForSerializer(metainternalversion.Codecs) + accepted, _ := negotiation.MediaTypesForSerializer(metainternalversionscheme.Codecs) err := negotiation.NewNotAcceptableError(accepted) return nil, err } @@ -76,7 +94,7 @@ func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Reque case target == nil: case target.Kind == "Table" && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): opts := &metav1beta1.TableOptions{} - if err := metav1beta1.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { return nil, err } switch errs := validation.ValidateTableOptions(opts); len(errs) { @@ -97,7 +115,7 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media case target == nil: case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): - return *target, metainternalversion.Codecs, true + return *target, metainternalversionscheme.Codecs, true } return scope.Kind, scope.Serializer, false } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 75764597f49..11d13cd3120 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -220,12 +220,15 @@ func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, defer func() { panicReason := recover() if panicReason != nil { - // Same as stdlib http server code. Manually allocate stack - // trace buffer size to prevent excessively large logs - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:goruntime.Stack(buf, false)] - panicReason = fmt.Sprintf("%v\n%s", panicReason, buf) + // do not wrap the sentinel ErrAbortHandler panic value + if panicReason != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:goruntime.Stack(buf, false)] + panicReason = fmt.Sprintf("%v\n%s", panicReason, buf) + } // Propagate to parent goroutine panicCh <- panicReason } @@ -406,7 +409,8 @@ func parseTimeout(str string) time.Duration { } klog.Errorf("Failed to parse %q: %v", str, err) } - return 30 * time.Second + // 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout. Everyone chooses 30. + return 34 * time.Second } func isDryRun(url *url.URL) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index b7c2dbcd4f5..686df45b78d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" @@ -45,7 +45,7 @@ import ( func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // For performance tracking purposes. - trace := utiltrace.New("Update", utiltrace.Field{"url", req.URL.Path}) + trace := utiltrace.New("Update", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { @@ -78,7 +78,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa } options := &metav1.UpdateOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return @@ -210,7 +210,7 @@ func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer return errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize a create on update")) } once.Do(func() { - authorizerDecision, authorizerReason, authorizerErr = a.Authorize(attributes) + authorizerDecision, authorizerReason, authorizerErr = a.Authorize(ctx, attributes) }) // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. if authorizerDecision == authorizer.DecisionAllow { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index be670fdc280..2433ea2facb 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -285,10 +285,12 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { buf := &bytes.Buffer{} streamBuf := &bytes.Buffer{} ch := s.Watching.ResultChan() + + defer s.Watching.Stop() + for { select { case <-done: - s.Watching.Stop() return case event, ok := <-ch: if !ok { @@ -317,25 +319,21 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { if err != nil { utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) // client disconnect. - s.Watching.Stop() return } if err := s.Encoder.Encode(outEvent, streamBuf); err != nil { // encoding error utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err)) - s.Watching.Stop() return } if s.UseTextFraming { if err := websocket.Message.Send(ws, streamBuf.String()); err != nil { // Client disconnect. - s.Watching.Stop() return } } else { if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil { // Client disconnect. - s.Watching.Stop() return } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 144fd44e2e1..9bf2467cc84 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -550,17 +550,17 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { - fm, err := fieldmanager.NewFieldManager( + reqScope.FieldManager, err = fieldmanager.NewDefaultFieldManager( a.group.OpenAPIModels, a.group.UnsafeConvertor, a.group.Defaulter, - fqKindToRegister.GroupVersion(), + a.group.Creater, + fqKindToRegister, reqScope.HubGroupVersion, ) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) } - reqScope.FieldManager = fm } for _, action := range actions { producedObject := storageMeta.ProducesObject(action.Verb) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD index 95e8093e922..8d13a34eadc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD @@ -28,7 +28,6 @@ go_library( "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 30ce24a4bd8..36ff861da7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -28,7 +28,6 @@ import ( "time" restful "github.com/emicklei/go-restful" - "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/types" @@ -77,9 +76,10 @@ var ( ) deprecatedRequestCounter = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ - Name: "apiserver_request_count", - Help: "(Deprecated) Counter of apiserver requests broken out for each verb, group, version, resource, scope, component, client, and HTTP response contentType and code.", - StabilityLevel: compbasemetrics.ALPHA, + Name: "apiserver_request_count", + Help: "Counter of apiserver requests broken out for each verb, group, version, resource, scope, component, client, and HTTP response contentType and code.", + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "client", "contentType", "code"}, ) @@ -107,21 +107,23 @@ var ( deprecatedRequestLatencies = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_request_latencies", - Help: "(Deprecated) Response latency distribution in microseconds for each verb, group, version, resource, subresource, scope and component.", + Help: "Response latency distribution in microseconds for each verb, group, version, resource, subresource, scope and component.", // Use buckets ranging from 125 ms to 8 seconds. - Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7), - StabilityLevel: compbasemetrics.ALPHA, + Buckets: compbasemetrics.ExponentialBuckets(125000, 2.0, 7), + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) deprecatedRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ Name: "apiserver_request_latencies_summary", - Help: "(Deprecated) Response latency summary in microseconds for each verb, group, version, resource, subresource, scope and component.", + Help: "Response latency summary in microseconds for each verb, group, version, resource, subresource, scope and component.", // Make the sliding window of 5h. // TODO: The value for this should be based on our SLI definition (medium term). - MaxAge: 5 * time.Hour, - StabilityLevel: compbasemetrics.ALPHA, + MaxAge: 5 * time.Hour, + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) @@ -130,7 +132,7 @@ var ( Name: "apiserver_response_sizes", Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.", // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). - Buckets: prometheus.ExponentialBuckets(1000, 10.0, 7), + Buckets: compbasemetrics.ExponentialBuckets(1000, 10.0, 7), StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, @@ -146,9 +148,10 @@ var ( ) DeprecatedDroppedRequests = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ - Name: "apiserver_dropped_requests", - Help: "(Deprecated) Number of requests dropped with 'Try again later' response", - StabilityLevel: compbasemetrics.ALPHA, + Name: "apiserver_dropped_requests", + Help: "Number of requests dropped with 'Try again later' response", + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"requestKind"}, ) @@ -173,12 +176,12 @@ var ( &compbasemetrics.HistogramOpts{ Name: "apiserver_watch_events_sizes", Help: "Watch event size distribution in bytes", - Buckets: prometheus.ExponentialBuckets(1024, 2.0, 8), // 1K, 2K, 4K, 8K, ..., 128K. + Buckets: compbasemetrics.ExponentialBuckets(1024, 2.0, 8), // 1K, 2K, 4K, 8K, ..., 128K. StabilityLevel: compbasemetrics.ALPHA, }, []string{"group", "version", "kind"}, ) - // Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time + // Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time // it reports maximal usage during the last second. currentInflightRequests = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ @@ -188,6 +191,15 @@ var ( }, []string{"requestKind"}, ) + + requestTerminationsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_terminations_total", + Help: "Number of requests which apiserver terminated in self-defense.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"}, + ) kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) metrics = []resettableCollector{ @@ -204,6 +216,7 @@ var ( WatchEvents, WatchEventsSizes, currentInflightRequests, + requestTerminationsTotal, } ) @@ -237,10 +250,11 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) { currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating)) } -// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own -// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if -// you already have a RequestInfo object. -func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { +// RecordRequestTermination records that the request was terminated early as part of a resource +// preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling, +// proxyHandler errors). RecordRequestTermination should only be called zero or one times +// per request. +func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInfo, component string, code int) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } @@ -252,9 +266,9 @@ func Record(req *http.Request, requestInfo *request.RequestInfo, component, cont // However, we need to tweak it e.g. to differentiate GET from LIST. verb := canonicalVerb(strings.ToUpper(req.Method), scope) if requestInfo.IsResourceRequest { - MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed) + requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() } else { - MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed) + requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc() } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD index 388c1846e3a..9f01735a389 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -32,6 +32,7 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index cc8ae39fa2c..8eb1a8500c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/validation/path" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -208,7 +209,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er // if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" { opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { // An error in parsing request will result in default to "list" and not setting "name" field. klog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) // Reset opts to not rely on partial results from parsing. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go index bc23ed1a0e1..8f40751a38f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -123,6 +123,7 @@ const ( // owner: @wojtek-t // alpha: v1.15 // beta: v1.16 + // GA: v1.17 // // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" @@ -132,7 +133,7 @@ const ( // // // Enables managing request concurrency with prioritization and fairness at each server - RequestManagement featuregate.Feature = "RequestManagement" + APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness" // owner: @wojtek-t // alpha: v1.16 @@ -161,7 +162,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, WinOverlay: {Default: false, PreRelease: featuregate.Alpha}, WinDSR: {Default: false, PreRelease: featuregate.Alpha}, - WatchBookmark: {Default: true, PreRelease: featuregate.Beta}, - RequestManagement: {Default: false, PreRelease: featuregate.Alpha}, + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha}, RemoveSelfLink: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS index 60f6346a5a7..59bdd352d7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -23,12 +23,8 @@ reviewers: - dims - hongchaodeng - krousey -- markturansky -- fgrzadkowski - xiang90 - resouer - mqliang -- feihujiang - sdminonne -- goltermann - enj diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS index f6ed8df426e..9ee8b8a53a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -18,16 +18,9 @@ reviewers: - dims - hongchaodeng - krousey -- jszczepkowski - euank -- markturansky -- fgrzadkowski -- fabioy - ingvagabund - david-mcmahon - jianhuiz -- nhlfr -- feihujiang - sdminonne -- goltermann - enj diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/BUILD index b7d06e11fb8..9029444cb9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/BUILD @@ -40,6 +40,7 @@ go_test( "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ], ) @@ -73,7 +74,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", @@ -98,6 +98,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/egressselector:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library", @@ -107,13 +108,12 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/openapi:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", @@ -121,6 +121,7 @@ go_library( "//vendor/k8s.io/kube-openapi/pkg/handler:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ], ) @@ -135,6 +136,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:all-srcs", "//staging/src/k8s.io/apiserver/pkg/server/egressselector:all-srcs", "//staging/src/k8s.io/apiserver/pkg/server/filters:all-srcs", "//staging/src/k8s.io/apiserver/pkg/server/healthz:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go index 71d77a7b40f..ee8051a0ae3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go @@ -17,12 +17,11 @@ limitations under the License. package server import ( - "crypto/tls" - "crypto/x509" "fmt" "net" "net/http" goruntime "runtime" + "runtime/debug" "sort" "strconv" "strings" @@ -31,7 +30,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/go-openapi/spec" - "github.com/pborman/uuid" + "github.com/google/uuid" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -55,6 +54,7 @@ import ( apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi" apirequest "k8s.io/apiserver/pkg/endpoints/request" genericregistry "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/server/dynamiccertificates" "k8s.io/apiserver/pkg/server/egressselector" genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/healthz" @@ -62,7 +62,6 @@ import ( serverstore "k8s.io/apiserver/pkg/server/storage" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" - certutil "k8s.io/client-go/util/cert" "k8s.io/component-base/logs" "k8s.io/klog" openapicommon "k8s.io/kube-openapi/pkg/common" @@ -116,6 +115,8 @@ type Config struct { EnableMetrics bool DisabledPostStartHooks sets.String + // done values in this values for this map are ignored. + PostStartHooks map[string]PostStartHookConfigEntry // Version will enable the /version endpoint if non-nil Version *version.Info @@ -180,7 +181,7 @@ type Config struct { // patch may cause. // This affects all places that applies json patch in the binary. JSONPatchMaxCopyBytes int64 - // The limit on the request body size that would be accepted and decoded in a write request. + // The limit on the request size that would be accepted and decoded in a write request // 0 means no limit. MaxRequestBodyBytes int64 // MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further @@ -231,13 +232,13 @@ type SecureServingInfo struct { // Cert is the main server cert which is used if SNI does not match. Cert must be non-nil and is // allowed to be in SNICerts. - Cert *tls.Certificate + Cert dynamiccertificates.CertKeyContentProvider - // SNICerts are the TLS certificates by name used for SNI. - SNICerts map[string]*tls.Certificate + // SNICerts are the TLS certificates used for SNI. + SNICerts []dynamiccertificates.SNICertKeyContentProvider // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates - ClientCA *x509.CertPool + ClientCA dynamiccertificates.CAContentProvider // MinTLSVersion optionally overrides the minimum TLS version supported. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). @@ -282,6 +283,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), DisabledPostStartHooks: sets.NewString(), + PostStartHooks: map[string]PostStartHookConfigEntry{}, HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), @@ -295,22 +297,20 @@ func NewConfig(codecs serializer.CodecFactory) *Config { MinRequestTimeout: 1800, LivezGracePeriod: time.Duration(0), ShutdownDelayDuration: time.Duration(0), - // 10MB is the recommended maximum client request size in bytes + // 1.5MB is the default client request size in bytes // the etcd server should accept. See - // https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90. + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. // A request body might be encoded in json, and is converted to - // proto when persisted in etcd. Assuming the upper bound of - // the size ratio is 10:1, we set 100MB as the largest size + // proto when persisted in etcd, so we allow 2x as the largest size // increase the "copy" operations in a json patch may cause. - JSONPatchMaxCopyBytes: int64(100 * 1024 * 1024), - // 10MB is the recommended maximum client request size in bytes + JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024), + // 1.5MB is the recommended client request size in byte // the etcd server should accept. See - // https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90. + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. // A request body might be encoded in json, and is converted to - // proto when persisted in etcd. Assuming the upper bound of - // the size ratio is 10:1, we set 100MB as the largest request + // proto when persisted in etcd, so we allow 2x as the largest request // body size to be accepted and decoded in a write request. - MaxRequestBodyBytes: int64(100 * 1024 * 1024), + MaxRequestBodyBytes: int64(3 * 1024 * 1024), // Default to treating watch as a long-running operation // Generic API servers have no inherent long-running subresources @@ -345,22 +345,19 @@ func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, de } } -func (c *AuthenticationInfo) ApplyClientCert(clientCAFile string, servingInfo *SecureServingInfo) error { - if servingInfo != nil { - if len(clientCAFile) > 0 { - clientCAs, err := certutil.CertsFromFile(clientCAFile) - if err != nil { - return fmt.Errorf("unable to load client CA file: %v", err) - } - if servingInfo.ClientCA == nil { - servingInfo.ClientCA = x509.NewCertPool() - } - for _, cert := range clientCAs { - servingInfo.ClientCA.AddCert(cert) - } - } +func (c *AuthenticationInfo) ApplyClientCert(clientCA dynamiccertificates.CAContentProvider, servingInfo *SecureServingInfo) error { + if servingInfo == nil { + return nil + } + if clientCA == nil { + return nil + } + if servingInfo.ClientCA == nil { + servingInfo.ClientCA = clientCA + return nil } + servingInfo.ClientCA = dynamiccertificates.NewUnionCAContentProvider(servingInfo.ClientCA, clientCA) return nil } @@ -380,6 +377,47 @@ type CompletedConfig struct { *completedConfig } +// AddHealthChecks adds a health check to our config to be exposed by the health endpoints +// of our configured apiserver. We should prefer this to adding healthChecks directly to +// the config unless we explicitly want to add a healthcheck only to a specific health endpoint. +func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) { + for _, check := range healthChecks { + c.HealthzChecks = append(c.HealthzChecks, check) + c.LivezChecks = append(c.LivezChecks, check) + c.ReadyzChecks = append(c.ReadyzChecks, check) + } +} + +// AddPostStartHook allows you to add a PostStartHook that will later be added to the server itself in a New call. +// Name conflicts will cause an error. +func (c *Config) AddPostStartHook(name string, hook PostStartHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return fmt.Errorf("hook func may not be nil: %q", name) + } + if c.DisabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) + return nil + } + + if postStartHook, exists := c.PostStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack) + } + c.PostStartHooks[name] = PostStartHookConfigEntry{hook: hook, originatingStack: string(debug.Stack())} + + return nil +} + +// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure. +func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { + if err := c.AddPostStartHook(name, hook); err != nil { + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) + } +} + // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { @@ -541,6 +579,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G } } + // first add poststarthooks from delegated targets for k, v := range delegationTarget.PostStartHooks() { s.postStartHooks[k] = v } @@ -549,6 +588,13 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G s.preShutdownHooks[k] = v } + // add poststarthooks that were preconfigured. Using the add method will give us an error if the same name has already been registered. + for name, preconfiguredPostStartHook := range c.PostStartHooks { + if err := s.AddPostStartHook(name, preconfiguredPostStartHook.hook); err != nil { + return nil, err + } + } + genericApiServerHookName := "generic-apiserver-start-informers" if c.SharedInformerFactory != nil && !s.isPostStartHookRegistered(genericApiServerHookName) { err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error { @@ -602,7 +648,6 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) - handler = genericapifilters.WithCacheControl(handler) handler = genericfilters.WithPanicRecovery(handler) return handler } @@ -682,7 +727,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati } privilegedLoopbackToken := loopback.BearerToken - var uid = uuid.NewRandom().String() + var uid = uuid.New().String() tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go index 53f1795275c..f2c2de9b324 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go @@ -21,6 +21,7 @@ import ( "net" restclient "k8s.io/client-go/rest" + netutils "k8s.io/utils/net" ) // LoopbackClientServerNameOverride is passed to the apiserver from the loopback client in order to @@ -38,12 +39,9 @@ func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, } return &restclient.Config{ - // Increase QPS limits. The client is currently passed to all admission plugins, - // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 - // for more details. Once #22422 is fixed, we may want to remove it. - QPS: 50, - Burst: 100, - Host: "https://" + net.JoinHostPort(host, port), + // Do not limit loopback client QPS. + QPS: -1, + Host: "https://" + net.JoinHostPort(host, port), // override the ServerName to select our loopback certificate via SNI. This name is also // used by the client to compare the returns server certificate against. TLSClientConfig: restclient.TLSClientConfig{ @@ -73,23 +71,27 @@ func LoopbackHostPort(bindAddress string) (string, string, error) { return "", "", fmt.Errorf("invalid server bind address: %q", bindAddress) } - isIPv6 := net.ParseIP(host).To4() == nil + isIPv6 := netutils.IsIPv6String(host) // Value is expected to be an IP or DNS name, not "0.0.0.0". if host == "0.0.0.0" || host == "::" { - host = "localhost" // Get ip of local interface, but fall back to "localhost". // Note that "localhost" is resolved with the external nameserver first with Go's stdlib. // So if localhost. resolves, we don't get a 127.0.0.1 as expected. - addrs, err := net.InterfaceAddrs() - if err == nil { - for _, address := range addrs { - if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && isIPv6 == (ipnet.IP.To4() == nil) { - host = ipnet.IP.String() - break - } + host = getLoopbackAddress(isIPv6) + } + return host, port, nil +} + +// getLoopbackAddress returns the ip address of local loopback interface. If any error occurs or loopback interface is not found, will fall back to "localhost" +func getLoopbackAddress(wantIPv6 bool) string { + addrs, err := net.InterfaceAddrs() + if err == nil { + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && wantIPv6 == netutils.IsIPv6(ipnet.IP) { + return ipnet.IP.String() } } } - return host, port, nil + return "localhost" } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/BUILD new file mode 100644 index 00000000000..735d024d516 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/BUILD @@ -0,0 +1,67 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "cert_key.go", + "client_ca.go", + "configmap_cafile_content.go", + "dynamic_cafile_content.go", + "dynamic_serving_content.go", + "dynamic_sni_content.go", + "named_certificates.go", + "static_content.go", + "tlsconfig.go", + "union_content.go", + "util.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates", + importpath = "k8s.io/apiserver/pkg/server/dynamiccertificates", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//staging/src/k8s.io/client-go/tools/events:go_default_library", + "//staging/src/k8s.io/client-go/util/cert:go_default_library", + "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "cert_key_test.go", + "client_ca_test.go", + "named_certificates_test.go", + "tlsconfig_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go new file mode 100644 index 00000000000..114002b1a07 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" +) + +// CertKeyContentProvider provides a certificate and matching private key +type CertKeyContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCertKeyContent provides cert and key byte content + CurrentCertKeyContent() ([]byte, []byte) +} + +// SNICertKeyContentProvider provides a certificate and matching private key as well as optional explicit names +type SNICertKeyContentProvider interface { + CertKeyContentProvider + // SNINames provides names used for SNI. May return nil. + SNINames() []string +} + +// certKeyContent holds the content for the cert and key +type certKeyContent struct { + cert []byte + key []byte +} + +func (c *certKeyContent) Equal(rhs *certKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.key, rhs.key) && bytes.Equal(c.cert, rhs.cert) +} + +// sniCertKeyContent holds the content for the cert and key as well as any explicit names +type sniCertKeyContent struct { + certKeyContent + sniNames []string +} + +func (c *sniCertKeyContent) Equal(rhs *sniCertKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if len(c.sniNames) != len(rhs.sniNames) { + return false + } + + for i := range c.sniNames { + if c.sniNames[i] != rhs.sniNames[i] { + return false + } + } + + return c.certKeyContent.Equal(&rhs.certKeyContent) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go new file mode 100644 index 00000000000..4348fa38784 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" +) + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCABundleContent provides ca bundle byte content. Errors can be contained to the controllers initializing + // the value. By the time you get here, you should always be returning a value that won't fail. + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators + VerifyOptions() (x509.VerifyOptions, bool) +} + +// dynamicCertificateContent holds the content that overrides the baseTLSConfig +type dynamicCertificateContent struct { + // clientCA holds the content for the clientCA bundle + clientCA caBundleContent + servingCert certKeyContent + sniCerts []sniCertKeyContent +} + +// caBundleContent holds the content for the clientCA bundle. Wrapping the bytes makes the Equals work nicely with the +// method receiver. +type caBundleContent struct { + caBundle []byte +} + +func (c *dynamicCertificateContent) Equal(rhs *dynamicCertificateContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if !c.clientCA.Equal(&rhs.clientCA) { + return false + } + + if !c.servingCert.Equal(&rhs.servingCert) { + return false + } + + if len(c.sniCerts) != len(rhs.sniCerts) { + return false + } + + for i := range c.sniCerts { + if !c.sniCerts[i].Equal(&rhs.sniCerts[i]) { + return false + } + } + + return true +} + +func (c *caBundleContent) Equal(rhs *caBundleContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.caBundle, rhs.caBundle) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go new file mode 100644 index 00000000000..dad6fc30630 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// ConfigMapCAController provies a CAContentProvider that can dynamically react to configmap changes +// It also fulfills the authenticator interface to provide verifyoptions +type ConfigMapCAController struct { + name string + + configmapLister corev1listers.ConfigMapLister + configmapNamespace string + configmapName string + configmapKey string + // configMapInformer is tracked so that we can start these on Run + configMapInformer cache.SharedIndexInformer + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + queue workqueue.RateLimitingInterface + // preRunCaches are the caches to sync before starting the work of this control loop + preRunCaches []cache.InformerSynced +} + +var _ Notifier = &ConfigMapCAController{} +var _ CAContentProvider = &ConfigMapCAController{} +var _ ControllerRunner = &ConfigMapCAController{} + +// NewDynamicCAFromConfigMapController returns a CAContentProvider based on a configmap that automatically reloads content. +// It is near-realtime via an informer. +func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, kubeClient kubernetes.Interface) (*ConfigMapCAController, error) { + if len(purpose) == 0 { + return nil, fmt.Errorf("missing purpose for ca bundle") + } + if len(namespace) == 0 { + return nil, fmt.Errorf("missing namespace for ca bundle") + } + if len(name) == 0 { + return nil, fmt.Errorf("missing name for ca bundle") + } + if len(key) == 0 { + return nil, fmt.Errorf("missing key for ca bundle") + } + caContentName := fmt.Sprintf("%s::%s::%s::%s", purpose, namespace, name, key) + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + uncastConfigmapInformer := corev1informers.NewFilteredConfigMapInformer(kubeClient, namespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *v1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", name).String() + }) + + configmapLister := corev1listers.NewConfigMapLister(uncastConfigmapInformer.GetIndexer()) + + c := &ConfigMapCAController{ + name: caContentName, + configmapNamespace: namespace, + configmapName: name, + configmapKey: key, + configmapLister: configmapLister, + configMapInformer: uncastConfigmapInformer, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)), + preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, + } + if err := c.loadCABundle(); err != nil { + // don't fail, but do print out a message + klog.Warningf("unable to load initial CA bundle for: %q due to: %s", c.name, err) + } + uncastConfigmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + return c, nil +} + +func (c *ConfigMapCAController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *ConfigMapCAController) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *ConfigMapCAController) loadCABundle() error { + configMap, err := c.configmapLister.ConfigMaps(c.configmapNamespace).Get(c.configmapName) + if err != nil { + return err + } + caBundle := configMap.Data[c.configmapKey] + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged([]byte(caBundle)) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), []byte(caBundle)) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *ConfigMapCAController) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *ConfigMapCAController) RunOnce() error { + // Ignore the error when running once because when using a dynamically loaded ca file, because we think it's better to have nothing for + // a brief time than completely crash. If crashing is necessary, higher order logic like a healthcheck and cause failures. + _ = c.loadCABundle() + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *ConfigMapCAController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // we have a personal informer that is narrowly scoped, start it. + go c.configMapInformer.Run(stopCh) + + // wait for your secondary caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, stopCh, c.preRunCaches...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + _ = wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + <-stopCh +} + +func (c *ConfigMapCAController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ConfigMapCAController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *ConfigMapCAController) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *ConfigMapCAController) CurrentCABundleContent() []byte { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *ConfigMapCAController) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + // This can happen if we've been unable load data from the apiserver for some reason. + // In this case, we should not accept any connections on the basis of this ca bundle. + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go new file mode 100644 index 00000000000..09b5e616a43 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -0,0 +1,254 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + "k8s.io/client-go/util/cert" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// FileRefreshDuration is exposed so that integration tests can crank up the reload speed. +var FileRefreshDuration = 1 * time.Minute + +// Listener is an interface to use to notify interested parties of a change. +type Listener interface { + // Enqueue should be called when an input may have changed + Enqueue() +} + +// Notifier is a way to add listeners +type Notifier interface { + // AddListener is adds a listener to be notified of potential input changes + AddListener(listener Listener) +} + +// ControllerRunner is a generic interface for starting a controller +type ControllerRunner interface { + // RunOnce runs the sync loop a single time. This useful for synchronous priming + RunOnce() error + + // Run should be called a go .Run + Run(workers int, stopCh <-chan struct{}) +} + +// DynamicFileCAContent provies a CAContentProvider that can dynamically react to new file content +// It also fulfills the authenticator interface to provide verifyoptions +type DynamicFileCAContent struct { + name string + + // filename is the name the file to read. + filename string + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicFileCAContent{} +var _ CAContentProvider = &DynamicFileCAContent{} +var _ ControllerRunner = &DynamicFileCAContent{} + +type caBundleAndVerifier struct { + caBundle []byte + verifyOptions x509.VerifyOptions +} + +// NewDynamicCAContentFromFile returns a CAContentProvider based on a filename that automatically reloads content +func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAContent, error) { + if len(filename) == 0 { + return nil, fmt.Errorf("missing filename for ca bundle") + } + name := fmt.Sprintf("%s::%s", purpose, filename) + + ret := &DynamicFileCAContent{ + name: name, + filename: filename, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadCABundle(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *DynamicFileCAContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *DynamicFileCAContent) loadCABundle() error { + caBundle, err := ioutil.ReadFile(c.filename) + if err != nil { + return err + } + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged(caBundle) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), caBundle) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *DynamicFileCAContent) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *DynamicFileCAContent) RunOnce() error { + return c.loadCABundle() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicFileCAContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + _ = wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicFileCAContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicFileCAContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicFileCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *DynamicFileCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *DynamicFileCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} + +// newVerifyOptions creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func newCABundleAndVerifier(name string, caBundle []byte) (*caBundleAndVerifier, error) { + if len(caBundle) == 0 { + return nil, fmt.Errorf("missing content for CA bundle %q", name) + } + + // Wrap with an x509 verifier + var err error + verifyOptions := defaultVerifyOptions() + verifyOptions.Roots, err = cert.NewPoolFromBytes(caBundle) + if err != nil { + return nil, fmt.Errorf("error loading CA bundle for %q: %v", name, err) + } + + return &caBundleAndVerifier{ + caBundle: caBundle, + verifyOptions: verifyOptions, + }, nil +} + +// defaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func defaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go new file mode 100644 index 00000000000..39be47fb07d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// DynamicFileServingContent provides a CertKeyContentProvider that can dynamically react to new file content +type DynamicFileServingContent struct { + name string + + // certFile is the name of the certificate file to read. + certFile string + // keyFile is the name of the key file to read. + keyFile string + + // servingCert is a certKeyContent that contains the last read, non-zero length content of the key and cert + servingCert atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicFileServingContent{} +var _ CertKeyContentProvider = &DynamicFileServingContent{} +var _ ControllerRunner = &DynamicFileServingContent{} + +// NewDynamicServingContentFromFiles returns a dynamic CertKeyContentProvider based on a cert and key filename +func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*DynamicFileServingContent, error) { + if len(certFile) == 0 || len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for serving cert") + } + name := fmt.Sprintf("%s::%s::%s", purpose, certFile, keyFile) + + ret := &DynamicFileServingContent{ + name: name, + certFile: certFile, + keyFile: keyFile, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadServingCert(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the serving cert content changes. +func (c *DynamicFileServingContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadServingCert determines the next set of content for the file. +func (c *DynamicFileServingContent) loadServingCert() error { + cert, err := ioutil.ReadFile(c.certFile) + if err != nil { + return err + } + key, err := ioutil.ReadFile(c.keyFile) + if err != nil { + return err + } + if len(cert) == 0 || len(key) == 0 { + return fmt.Errorf("missing content for serving cert %q", c.Name()) + } + + // Ensure that the key matches the cert and both are valid + _, err = tls.X509KeyPair(cert, key) + if err != nil { + return err + } + + newCertKey := &certKeyContent{ + cert: cert, + key: key, + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := c.servingCert.Load().(*certKeyContent) + if ok && existing != nil && existing.Equal(newCertKey) { + return nil + } + + c.servingCert.Store(newCertKey) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// RunOnce runs a single sync loop +func (c *DynamicFileServingContent) RunOnce() error { + return c.loadServingCert() +} + +// Run starts the controller and blocks until stopCh is closed. +func (c *DynamicFileServingContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + _ = wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicFileServingContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicFileServingContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadServingCert() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicFileServingContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides serving cert byte content +func (c *DynamicFileServingContent) CurrentCertKeyContent() ([]byte, []byte) { + certKeyContent := c.servingCert.Load().(*certKeyContent) + return certKeyContent.cert, certKeyContent.key +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go new file mode 100644 index 00000000000..fad76fab626 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +// DynamicFileSNIContent provides a SNICertKeyContentProvider that can dynamically react to new file content +type DynamicFileSNIContent struct { + *DynamicFileServingContent + sniNames []string +} + +var _ Notifier = &DynamicFileSNIContent{} +var _ SNICertKeyContentProvider = &DynamicFileSNIContent{} +var _ ControllerRunner = &DynamicFileSNIContent{} + +// NewDynamicSNIContentFromFiles returns a dynamic SNICertKeyContentProvider based on a cert and key filename and explicit names +func NewDynamicSNIContentFromFiles(purpose, certFile, keyFile string, sniNames ...string) (*DynamicFileSNIContent, error) { + servingContent, err := NewDynamicServingContentFromFiles(purpose, certFile, keyFile) + if err != nil { + return nil, err + } + + ret := &DynamicFileSNIContent{ + DynamicFileServingContent: servingContent, + sniNames: sniNames, + } + if err := ret.loadServingCert(); err != nil { + return nil, err + } + + return ret, nil +} + +// SNINames returns explicitly set SNI names for the certificate. These are not dynamic. +func (c *DynamicFileSNIContent) SNINames() []string { + return c.sniNames +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go new file mode 100644 index 00000000000..5590dea5991 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" +) + +// BuildNamedCertificates returns a map of *tls.Certificate by name. It's +// suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs +// is invalid. Returns nil if len(certs) == 0 +func (c *DynamicServingCertificateController) BuildNamedCertificates(sniCerts []sniCertKeyContent) (map[string]*tls.Certificate, error) { + nameToCertificate := map[string]*tls.Certificate{} + byNameExplicit := map[string]*tls.Certificate{} + + // Iterate backwards so that earlier certs take precedence in the names map + for i := len(sniCerts) - 1; i >= 0; i-- { + cert, err := tls.X509KeyPair(sniCerts[i].cert, sniCerts[i].key) + if err != nil { + return nil, fmt.Errorf("invalid SNI cert keypair [%d/%q]: %v", i, c.sniCerts[i].Name(), err) + } + + // error is not possible given above call to X509KeyPair + x509Cert, _ := x509.ParseCertificate(cert.Certificate[0]) + + names := sniCerts[i].sniNames + for _, name := range names { + byNameExplicit[name] = &cert + } + + klog.V(2).Infof("loaded SNI cert [%d/%q]: %s", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(nil, nil, v1.EventTypeWarning, "TLSConfigChanged", "SNICertificateReload", "loaded SNI cert [%d/%q]: %s with explicit names %v", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert), names) + } + + if len(names) == 0 { + names = getCertificateNames(x509Cert) + for _, name := range names { + nameToCertificate[name] = &cert + } + } + } + + // Explicitly set names must override + for k, v := range byNameExplicit { + nameToCertificate[k] = v + } + + return nameToCertificate, nil +} + +// getCertificateNames returns names for an x509.Certificate. The names are +// suitable for use in tls.Config#NamedCertificates. +func getCertificateNames(cert *x509.Certificate) []string { + var names []string + + cn := cert.Subject.CommonName + if cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 { + names = append(names, cn) + } + for _, san := range cert.DNSNames { + names = append(names, san) + } + // intentionally all IPs in the cert are ignored as SNI forbids passing IPs + // to select a cert. Before go 1.6 the tls happily passed IPs as SNI values. + + return names +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go new file mode 100644 index 00000000000..239610df647 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" +) + +type staticCAContent struct { + name string + caBundle *caBundleAndVerifier +} + +var _ CAContentProvider = &staticCAContent{} + +// NewStaticCAContentFromFile returns a CAContentProvider based on a filename +func NewStaticCAContentFromFile(filename string) (CAContentProvider, error) { + if len(filename) == 0 { + return nil, fmt.Errorf("missing filename for ca bundle") + } + + caBundle, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return NewStaticCAContent(filename, caBundle) +} + +// NewStaticCAContent returns a CAContentProvider that always returns the same value +func NewStaticCAContent(name string, caBundle []byte) (CAContentProvider, error) { + caBundleAndVerifier, err := newCABundleAndVerifier(name, caBundle) + if err != nil { + return nil, err + } + + return &staticCAContent{ + name: name, + caBundle: caBundleAndVerifier, + }, nil +} + +// Name is just an identifier +func (c *staticCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *staticCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.caBundle +} + +func (c *staticCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + return c.caBundle.verifyOptions, true +} + +type staticCertKeyContent struct { + name string + cert []byte + key []byte +} + +type staticSNICertKeyContent struct { + staticCertKeyContent + sniNames []string +} + +// NewStaticCertKeyContentFromFiles returns a CertKeyContentProvider based on a filename +func NewStaticCertKeyContentFromFiles(certFile, keyFile string) (CertKeyContentProvider, error) { + if len(certFile) == 0 { + return nil, fmt.Errorf("missing filename for certificate") + } + if len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for key") + } + + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, err + } + + return NewStaticCertKeyContent(fmt.Sprintf("cert: %s, key: %s", certFile, keyFile), certPEMBlock, keyPEMBlock) +} + +// NewStaticSNICertKeyContentFromFiles returns a SNICertKeyContentProvider based on a filename +func NewStaticSNICertKeyContentFromFiles(certFile, keyFile string, sniNames ...string) (SNICertKeyContentProvider, error) { + if len(certFile) == 0 { + return nil, fmt.Errorf("missing filename for certificate") + } + if len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for key") + } + + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, err + } + + return NewStaticSNICertKeyContent(fmt.Sprintf("cert: %s, key: %s", certFile, keyFile), certPEMBlock, keyPEMBlock, sniNames...) +} + +// NewStaticCertKeyContent returns a CertKeyContentProvider that always returns the same value +func NewStaticCertKeyContent(name string, cert, key []byte) (CertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, nil +} + +// NewStaticSNICertKeyContent returns a SNICertKeyContentProvider that always returns the same value +func NewStaticSNICertKeyContent(name string, cert, key []byte, sniNames ...string) (SNICertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticSNICertKeyContent{ + staticCertKeyContent: staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, + sniNames: sniNames, + }, nil +} + +// Name is just an identifier +func (c *staticCertKeyContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key content +func (c *staticCertKeyContent) CurrentCertKeyContent() ([]byte, []byte) { + return c.cert, c.key +} + +func (c *staticSNICertKeyContent) SNINames() []string { + return c.sniNames +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go new file mode 100644 index 00000000000..78e094a5d81 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -0,0 +1,263 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "sync/atomic" + "time" + + v1 "k8s.io/api/core/v1" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/events" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +const workItemKey = "key" + +// DynamicServingCertificateController dynamically loads certificates and provides a golang tls compatible dynamic GetCertificate func. +type DynamicServingCertificateController struct { + // baseTLSConfig is the static portion of the tlsConfig for serving to clients. It is copied and the copy is mutated + // based on the dynamic cert state. + baseTLSConfig tls.Config + + // clientCA provides the very latest content of the ca bundle + clientCA CAContentProvider + // servingCert provides the very latest content of the default serving certificate + servingCert CertKeyContentProvider + // sniCerts are a list of CertKeyContentProvider with associated names used for SNI + sniCerts []SNICertKeyContentProvider + + // currentlyServedContent holds the original bytes that we are serving. This is used to decide if we need to set a + // new atomic value. The types used for efficient TLSConfig preclude using the processed value. + currentlyServedContent *dynamicCertificateContent + // currentServingTLSConfig holds a *tls.Config that will be used to serve requests + currentServingTLSConfig atomic.Value + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface + eventRecorder events.EventRecorder +} + +var _ Listener = &DynamicServingCertificateController{} + +// NewDynamicServingCertificateController returns a controller that can be used to keep a TLSConfig up to date. +func NewDynamicServingCertificateController( + baseTLSConfig tls.Config, + clientCA CAContentProvider, + servingCert CertKeyContentProvider, + sniCerts []SNICertKeyContentProvider, + eventRecorder events.EventRecorder, +) *DynamicServingCertificateController { + c := &DynamicServingCertificateController{ + baseTLSConfig: baseTLSConfig, + clientCA: clientCA, + servingCert: servingCert, + sniCerts: sniCerts, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"), + eventRecorder: eventRecorder, + } + + return c +} + +// GetConfigForClient is an implementation of tls.Config.GetConfigForClient +func (c *DynamicServingCertificateController) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { + uncastObj := c.currentServingTLSConfig.Load() + if uncastObj == nil { + return nil, errors.New("dynamiccertificates: configuration not ready") + } + tlsConfig, ok := uncastObj.(*tls.Config) + if !ok { + return nil, errors.New("dynamiccertificates: unexpected config type") + } + + return tlsConfig.Clone(), nil +} + +// newTLSContent determines the next set of content for overriding the baseTLSConfig. +func (c *DynamicServingCertificateController) newTLSContent() (*dynamicCertificateContent, error) { + newContent := &dynamicCertificateContent{} + + if c.clientCA != nil { + currClientCABundle := c.clientCA.CurrentCABundleContent() + // we allow removing all client ca bundles because the server is still secure when this happens. it just means + // that there isn't a hint to clients about which client-cert to used. this happens when there is no client-ca + // yet known for authentication, which can happen in aggregated apiservers and some kube-apiserver deployment modes. + newContent.clientCA = caBundleContent{caBundle: currClientCABundle} + } + + if c.servingCert != nil { + currServingCert, currServingKey := c.servingCert.CurrentCertKeyContent() + if len(currServingCert) == 0 || len(currServingKey) == 0 { + return nil, fmt.Errorf("not loading an empty serving certificate from %q", c.servingCert.Name()) + } + + newContent.servingCert = certKeyContent{cert: currServingCert, key: currServingKey} + } + + for i, sniCert := range c.sniCerts { + currCert, currKey := sniCert.CurrentCertKeyContent() + if len(currCert) == 0 || len(currKey) == 0 { + return nil, fmt.Errorf("not loading an empty SNI certificate from %d/%q", i, sniCert.Name()) + } + + newContent.sniCerts = append(newContent.sniCerts, sniCertKeyContent{certKeyContent: certKeyContent{cert: currCert, key: currKey}, sniNames: sniCert.SNINames()}) + } + + return newContent, nil +} + +// syncCerts gets newTLSContent, if it has changed from the existing, the content is parsed and stored for usage in +// GetConfigForClient. +func (c *DynamicServingCertificateController) syncCerts() error { + newContent, err := c.newTLSContent() + if err != nil { + return err + } + // if the content is the same as what we currently have, we can simply skip it. This works because we are single + // threaded. If you ever make this multi-threaded, add a lock. + if newContent.Equal(c.currentlyServedContent) { + return nil + } + + // make a shallow copy and override the dynamic pieces which have changed. + newTLSConfigCopy := c.baseTLSConfig.Clone() + + // parse new content to add to TLSConfig + if len(newContent.clientCA.caBundle) > 0 { + newClientCAPool := x509.NewCertPool() + newClientCAs, err := cert.ParseCertsPEM(newContent.clientCA.caBundle) + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", string(newContent.clientCA.caBundle), err) + } + for i, cert := range newClientCAs { + klog.V(2).Infof("loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(nil, nil, v1.EventTypeWarning, "TLSConfigChanged", "CACertificateReload", "loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + } + + newClientCAPool.AddCert(cert) + } + + newTLSConfigCopy.ClientCAs = newClientCAPool + } + + if len(newContent.servingCert.cert) > 0 && len(newContent.servingCert.key) > 0 { + cert, err := tls.X509KeyPair(newContent.servingCert.cert, newContent.servingCert.key) + if err != nil { + return fmt.Errorf("invalid serving cert keypair: %v", err) + } + + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("invalid serving cert: %v", err) + } + + klog.V(2).Infof("loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(nil, nil, v1.EventTypeWarning, "TLSConfigChanged", "ServingCertificateReload", "loaded serving cert [%q]: %s", c.clientCA.Name(), GetHumanCertDetail(x509Cert)) + } + + newTLSConfigCopy.Certificates = []tls.Certificate{cert} + } + + if len(newContent.sniCerts) > 0 { + newTLSConfigCopy.NameToCertificate, err = c.BuildNamedCertificates(newContent.sniCerts) + if err != nil { + return fmt.Errorf("unable to build named certificate map: %v", err) + } + + // append all named certs. Otherwise, the go tls stack will think no SNI processing + // is necessary because there is only one cert anyway. + // Moreover, if servingCert is not set, the first SNI + // cert will become the default cert. That's what we expect anyway. + for _, sniCert := range newTLSConfigCopy.NameToCertificate { + newTLSConfigCopy.Certificates = append(newTLSConfigCopy.Certificates, *sniCert) + } + } + + // store new values of content for serving. + c.currentServingTLSConfig.Store(newTLSConfigCopy) + c.currentlyServedContent = newContent // this is single threaded, so we have no locking issue + + return nil +} + +// RunOnce runs a single sync step to ensure that we have a valid starting configuration. +func (c *DynamicServingCertificateController) RunOnce() error { + return c.syncCerts() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicServingCertificateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting DynamicServingCertificateController") + defer klog.Infof("Shutting down DynamicServingCertificateController") + + // synchronously load once. We will trigger again, so ignoring any error is fine + _ = c.RunOnce() + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.Until(func() { + c.Enqueue() + }, 1*time.Minute, stopCh) + + <-stopCh +} + +func (c *DynamicServingCertificateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicServingCertificateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncCerts() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Enqueue a method to allow separate control loops to cause the certificate controller to trigger and read content. +func (c *DynamicServingCertificateController) Enqueue() { + c.queue.Add(workItemKey) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go new file mode 100644 index 00000000000..89e19ea5a3a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type unionCAContent []CAContentProvider + +var _ Notifier = &unionCAContent{} +var _ CAContentProvider = &unionCAContent{} +var _ ControllerRunner = &unionCAContent{} + +// NewUnionCAContentProvider returns a CAContentProvider that is a union of other CAContentProviders +func NewUnionCAContentProvider(caContentProviders ...CAContentProvider) CAContentProvider { + return unionCAContent(caContentProviders) +} + +// Name is just an identifier +func (c unionCAContent) Name() string { + names := []string{} + for _, curr := range c { + names = append(names, curr.Name()) + } + return strings.Join(names, ",") +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) CurrentCABundleContent() []byte { + caBundles := [][]byte{} + for _, curr := range c { + if currCABytes := curr.CurrentCABundleContent(); len(currCABytes) > 0 { + caBundles = append(caBundles, []byte(strings.TrimSpace(string(currCABytes)))) + } + } + + return bytes.Join(caBundles, []byte("\n")) +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + currCABundle := c.CurrentCABundleContent() + if len(currCABundle) == 0 { + return x509.VerifyOptions{}, false + } + + // TODO make more efficient. This isn't actually used in any of our mainline paths. It's called to build the TLSConfig + // TODO on file changes, but the actual authentication runs against the individual items, not the union. + ret, err := newCABundleAndVerifier(c.Name(), c.CurrentCABundleContent()) + if err != nil { + // because we're made up of already vetted values, this indicates some kind of coding error + panic(err) + } + + return ret.verifyOptions, true +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) AddListener(listener Listener) { + for _, curr := range c { + if notifier, ok := curr.(Notifier); ok { + notifier.AddListener(listener) + } + } +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) RunOnce() error { + errors := []error{} + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + if err := controller.RunOnce(); err != nil { + errors = append(errors, err) + } + } + } + + return utilerrors.NewAggregate(errors) +} + +// Run runs the controller +func (c unionCAContent) Run(workers int, stopCh <-chan struct{}) { + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + go controller.Run(workers, stopCh) + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go new file mode 100644 index 00000000000..6906045cd29 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +// GetHumanCertDetail is a convenient method for printing compact details of certificate that helps when debugging +// kube-apiserver usage of certs. +func GetHumanCertDetail(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + for _, dnsName := range certificate.DNSNames { + validServingNames = append(validServingNames, dnsName) + } + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, servingString, signerHumanName, certificate.NotBefore.UTC(), certificate.NotAfter.UTC(), + time.Now().UTC()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index a14714edaf1..8cbbb3b014e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -60,15 +60,12 @@ func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSe if decodedConfig.Kind != "EgressSelectorConfiguration" { return nil, fmt.Errorf("invalid service configuration object %q", decodedConfig.Kind) } - config, err := cfgScheme.ConvertToVersion(&decodedConfig, apiserver.SchemeGroupVersion) - if err != nil { + internalConfig := &apiserver.EgressSelectorConfiguration{} + if err := cfgScheme.Convert(&decodedConfig, internalConfig, nil); err != nil { // we got an error where the decode wasn't related to a missing type return nil, err } - if internalConfig, ok := config.(*apiserver.EgressSelectorConfiguration); ok { - return internalConfig, nil - } - return nil, fmt.Errorf("unable to convert %T to *apiserver.EgressSelectorConfiguration", config) + return internalConfig, nil } // ValidateEgressSelectorConfiguration checks the apiserver.EgressSelectorConfiguration for diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index f7bc691c762..099e23e14fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -178,7 +178,7 @@ func WithMaxInFlightLimit( } } } - metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0) + metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests) tooManyRequests(r, w) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index e79da50bf20..9b8d6d4b138 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -28,6 +28,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" ) @@ -58,7 +59,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir postTimeoutFn := func() { cancel() - metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0) + metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout) } return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) } @@ -98,7 +99,8 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { go func() { defer func() { err := recover() - if err != nil { + // do not wrap the sentinel ErrAbortHandler panic value + if err != nil && err != http.ErrAbortHandler { // Same as stdlib http server code. Manually allocate stack // trace buffer size to prevent excessively large logs const size = 64 << 10 @@ -118,6 +120,23 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return case <-after: + defer func() { + // resultCh needs to have a reader, since the function doing + // the work needs to send to it. This is defer'd to ensure it runs + // ever if the post timeout work itself panics. + go func() { + res := <-resultCh + if res != nil { + switch t := res.(type) { + case error: + utilruntime.HandleError(t) + default: + utilruntime.HandleError(fmt.Errorf("%v", res)) + } + } + }() + }() + postTimeoutFn() tw.timeout(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go index d99308861c8..96bebdbd600 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -25,9 +25,16 @@ import ( "k8s.io/apiserver/pkg/server/httplog" ) -// WithPanicRecovery wraps an http Handler to recover and log panics. +// WithPanicRecovery wraps an http Handler to recover and log panics (except in the special case of http.ErrAbortHandler panics, which suppress logging). func WithPanicRecovery(handler http.Handler) http.Handler { return withPanicRecovery(handler, func(w http.ResponseWriter, req *http.Request, err interface{}) { + if err == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) klog.Errorf("apiserver panic'd on %v %v", req.Method, req.RequestURI) }) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/hooks.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/hooks.go index fba876d2573..04e7f830234 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/hooks.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -66,6 +66,13 @@ type postStartHookEntry struct { done chan struct{} } +type PostStartHookConfigEntry struct { + hook PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + originatingStack string +} + type preShutdownHookEntry struct { hook PreShutdownHookFunc } @@ -76,9 +83,10 @@ func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) return fmt.Errorf("missing name") } if hook == nil { - return nil + return fmt.Errorf("hook func may not be nil: %q", name) } if s.disabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go index a03b80d3ce7..be1077a8808 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go @@ -121,6 +121,7 @@ func StringFlagPutHandler(setter StringFlagSetterFunc) http.HandlerFunc { // writePlainText renders a simple string response. func writePlainText(statusCode int, text string, w http.ResponseWriter) { w.Header().Set("Content-Type", "text/plain") + w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(statusCode) fmt.Fprintln(w, text) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/secure_serving.go index 031f4b045ad..6d165abdf55 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -19,77 +19,132 @@ package server import ( "context" "crypto/tls" - "crypto/x509" "fmt" "net" "net/http" - "strings" "time" "golang.org/x/net/http2" "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apiserver/pkg/server/dynamiccertificates" ) const ( defaultKeepAlivePeriod = 3 * time.Minute ) -// Serve runs the secure http server. It fails only if certificates cannot be loaded or the initial listen call fails. -// The actual server loop (stoppable by closing stopCh) runs in a go routine, i.e. Serve does not block. -// It returns a stoppedCh that is closed when all non-hijacked active requests have been processed. -func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) (<-chan struct{}, error) { - if s.Listener == nil { - return nil, fmt.Errorf("listener must not be nil") - } - - secureServer := &http.Server{ - Addr: s.Listener.Addr().String(), - Handler: handler, - MaxHeaderBytes: 1 << 20, - TLSConfig: &tls.Config{ - NameToCertificate: s.SNICerts, - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - // enable HTTP2 for go's 1.7 HTTP Server - NextProtos: []string{"h2", "http/1.1"}, - }, +// tlsConfig produces the tls.Config to serve with. +func (s *SecureServingInfo) tlsConfig(stopCh <-chan struct{}) (*tls.Config, error) { + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + // enable HTTP2 for go's 1.7 HTTP Server + NextProtos: []string{"h2", "http/1.1"}, } + // these are static aspects of the tls.Config if s.DisableHTTP2 { klog.Info("Forcing use of http/1.1 only") - secureServer.TLSConfig.NextProtos = []string{"http/1.1"} + tlsConfig.NextProtos = []string{"http/1.1"} } - if s.MinTLSVersion > 0 { - secureServer.TLSConfig.MinVersion = s.MinTLSVersion + tlsConfig.MinVersion = s.MinTLSVersion } if len(s.CipherSuites) > 0 { - secureServer.TLSConfig.CipherSuites = s.CipherSuites + tlsConfig.CipherSuites = s.CipherSuites } - if s.Cert != nil { - secureServer.TLSConfig.Certificates = []tls.Certificate{*s.Cert} + if s.ClientCA != nil { + // Populate PeerCertificates in requests, but don't reject connections without certificates + // This allows certificates to be validated by authenticators, while still allowing other auth types + tlsConfig.ClientAuth = tls.RequestClientCert } - // append all named certs. Otherwise, the go tls stack will think no SNI processing - // is necessary because there is only one cert anyway. - // Moreover, if ServerCert.CertFile/ServerCert.KeyFile are not set, the first SNI - // cert will become the default cert. That's what we expect anyway. - for _, c := range s.SNICerts { - secureServer.TLSConfig.Certificates = append(secureServer.TLSConfig.Certificates, *c) + if s.ClientCA != nil || s.Cert != nil || len(s.SNICerts) > 0 { + dynamicCertificateController := dynamiccertificates.NewDynamicServingCertificateController( + *tlsConfig, + s.ClientCA, + s.Cert, + s.SNICerts, + nil, // TODO see how to plumb an event recorder down in here. For now this results in simply klog messages. + ) + // register if possible + if notifier, ok := s.ClientCA.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + if notifier, ok := s.Cert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + // start controllers if possible + if controller, ok := s.ClientCA.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of client CA failed: %v", err) + } + + go controller.Run(1, stopCh) + } + if controller, ok := s.Cert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of default serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + for _, sniCert := range s.SNICerts { + if notifier, ok := sniCert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + + if controller, ok := sniCert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of SNI serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + } + + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := dynamicCertificateController.RunOnce(); err != nil { + klog.Warningf("Initial population of dynamic certificates failed: %v", err) + } + go dynamicCertificateController.Run(1, stopCh) + + tlsConfig.GetConfigForClient = dynamicCertificateController.GetConfigForClient } - if s.ClientCA != nil { - // Populate PeerCertificates in requests, but don't reject connections without certificates - // This allows certificates to be validated by authenticators, while still allowing other auth types - secureServer.TLSConfig.ClientAuth = tls.RequestClientCert - // Specify allowed CAs for client certificates - secureServer.TLSConfig.ClientCAs = s.ClientCA + return tlsConfig, nil +} + +// Serve runs the secure http server. It fails only if certificates cannot be loaded or the initial listen call fails. +// The actual server loop (stoppable by closing stopCh) runs in a go routine, i.e. Serve does not block. +// It returns a stoppedCh that is closed when all non-hijacked active requests have been processed. +func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) (<-chan struct{}, error) { + if s.Listener == nil { + return nil, fmt.Errorf("listener must not be nil") + } + + tlsConfig, err := s.tlsConfig(stopCh) + if err != nil { + return nil, err + } + + secureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + TLSConfig: tlsConfig, } // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. @@ -173,57 +228,6 @@ func RunServer( return stoppedCh, nil } -type NamedTLSCert struct { - TLSCert tls.Certificate - - // Names is a list of domain patterns: fully qualified domain names, possibly prefixed with - // wildcard segments. - Names []string -} - -// GetNamedCertificateMap returns a map of *tls.Certificate by name. It's -// suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs -// cannot be loaded. Returns nil if len(certs) == 0 -func GetNamedCertificateMap(certs []NamedTLSCert) (map[string]*tls.Certificate, error) { - // register certs with implicit names first, reverse order such that earlier trump over the later - byName := map[string]*tls.Certificate{} - for i := len(certs) - 1; i >= 0; i-- { - if len(certs[i].Names) > 0 { - continue - } - cert := &certs[i].TLSCert - - // read names from certificate common names and DNS names - if len(cert.Certificate) == 0 { - return nil, fmt.Errorf("empty SNI certificate, skipping") - } - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return nil, fmt.Errorf("parse error for SNI certificate: %v", err) - } - cn := x509Cert.Subject.CommonName - if cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 { - byName[cn] = cert - } - for _, san := range x509Cert.DNSNames { - byName[san] = cert - } - // intentionally all IPs in the cert are ignored as SNI forbids passing IPs - // to select a cert. Before go 1.6 the tls happily passed IPs as SNI values. - } - - // register certs with explicit names last, overwriting every of the implicit ones, - // again in reverse order. - for i := len(certs) - 1; i >= 0; i-- { - namedCert := &certs[i] - for _, name := range namedCert.Names { - byName[name] = &certs[i].TLSCert - } - } - - return byName, nil -} - // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted // connections. It's used by ListenAndServe and ListenAndServeTLS so // dead TCP connections (e.g. closing laptop mid-download) eventually diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go index eb892b5a5c0..0f2d45c9e78 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go @@ -52,6 +52,24 @@ func (o *ResourceConfig) EnableAll() { } } +// DisableMatchingVersions disables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) DisableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = false + } + } +} + +// EnableMatchingVersions enables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) EnableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = true + } + } +} + // DisableVersions disables the versions entirely. func (o *ResourceConfig) DisableVersions(versions ...schema.GroupVersion) { for _, version := range versions { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 267de1370b3..f3a54043a72 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -307,8 +307,8 @@ func (s *DefaultStorageFactory) Backends() []Backend { tlsConfig.Certificates = []tls.Certificate{cert} } } - if len(s.StorageConfig.Transport.CAFile) > 0 { - if caCert, err := ioutil.ReadFile(s.StorageConfig.Transport.CAFile); err != nil { + if len(s.StorageConfig.Transport.TrustedCAFile) > 0 { + if caCert, err := ioutil.ReadFile(s.StorageConfig.Transport.TrustedCAFile); err != nil { klog.Errorf("failed to read ca file while getting backends: %s", err) } else { caPool := x509.NewCertPool() diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/BUILD index 71d0bbc900e..f2506e70a86 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/BUILD @@ -33,8 +33,10 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage", importpath = "k8s.io/apiserver/pkg/storage", deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS index 26ea9eeb431..50d91abc89a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS @@ -19,14 +19,11 @@ reviewers: - timothysc - hongchaodeng - krousey -- fgrzadkowski - xiang90 - mml - ingvagabund - resouer - mbohlool -- lixiaobing10051267 - mqliang -- feihujiang - rrati - enj diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/errors.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/errors.go index f73d47aedc8..3acee459808 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -19,6 +19,8 @@ package storage import ( "fmt" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -168,3 +170,31 @@ func NewInternalError(reason string) InternalError { func NewInternalErrorf(format string, a ...interface{}) InternalError { return InternalError{fmt.Sprintf(format, a...)} } + +var tooLargeResourceVersionCauseMsg = "Too large resource version" + +// NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for +// a minimum resource version that is larger than the largest currently available resource version for a requested resource. +func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { + err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err.ErrStatus.Details.Causes = []metav1.StatusCause{{Message: tooLargeResourceVersionCauseMsg}} + return err +} + +// IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. +func IsTooLargeResourceVersion(err error) bool { + if !errors.IsTimeout(err) { + return false + } + switch t := err.(type) { + case errors.APIStatus: + if d := t.Status().Details; d != nil { + for _, cause := range d.Causes { + if cause.Message == tooLargeResourceVersionCauseMsg { + return true + } + } + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD index 22614fc570a..e95c24c4e98 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -39,13 +39,13 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/integration:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/go.etcd.io/etcd/clientv3:go_default_library", + "//vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", + "//vendor/go.etcd.io/etcd/integration:go_default_library", + "//vendor/go.etcd.io/etcd/mvcc/mvccpb:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) @@ -78,9 +78,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/go.etcd.io/etcd/clientv3:go_default_library", + "//vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", + "//vendor/go.etcd.io/etcd/mvcc/mvccpb:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/trace:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go index d4524f49221..bbae59153bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" + "go.etcd.io/etcd/clientv3" "k8s.io/klog" ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go index 136570a6fc3..b33751480a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go @@ -19,7 +19,7 @@ package etcd3 import ( "k8s.io/apimachinery/pkg/api/errors" - etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + etcdrpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go index dbaf785b261..c4e1f8032b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -18,8 +18,8 @@ package etcd3 import ( "fmt" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" ) type event struct { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go index e7e554c6270..6b5a5700a9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" + "go.etcd.io/etcd/clientv3" ) // leaseManager is used to manage leases requested from etcd. If a new write diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go index a117db6fe26..f2cf0e1b26c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go @@ -19,7 +19,7 @@ package etcd3 import ( "fmt" - "github.com/coreos/etcd/clientv3" + "go.etcd.io/etcd/clientv3" "k8s.io/klog" ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index 32210db1646..e2c4f1ca008 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -52,9 +52,10 @@ var ( deprecatedEtcdRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ - Name: "etcd_request_latencies_summary", - Help: "(Deprecated) Etcd request latency summary in microseconds for each operation and object type.", - StabilityLevel: compbasemetrics.ALPHA, + Name: "etcd_request_latencies_summary", + Help: "Etcd request latency summary in microseconds for each operation and object type.", + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"operation", "type"}, ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index f065872b52b..a90675784a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -28,8 +28,7 @@ import ( "strings" "time" - "github.com/coreos/etcd/clientv3" - "k8s.io/klog" + "go.etcd.io/etcd/clientv3" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -41,6 +40,7 @@ import ( "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" utiltrace "k8s.io/utils/trace" ) @@ -119,6 +119,9 @@ func (s *store) Get(ctx context.Context, key string, resourceVersion string, out if err != nil { return err } + if err = s.ensureMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } if len(getResp.Kvs) == 0 { if ignoreNotFound { @@ -398,6 +401,9 @@ func (s *store) GetToList(ctx context.Context, key string, resourceVersion strin if err != nil { return err } + if err = s.ensureMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } if len(getResp.Kvs) > 0 { data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) @@ -559,17 +565,6 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor options = append(options, clientv3.WithRange(rangeEnd)) default: - if len(resourceVersion) > 0 { - fromRV, err := s.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) - } - if fromRV > 0 { - options = append(options, clientv3.WithRev(int64(fromRV))) - } - returnedRV = int64(fromRV) - } - options = append(options, clientv3.WithPrefix()) } @@ -584,6 +579,9 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor if err != nil { return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) } + if err = s.ensureMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } hasMore = getResp.More if len(getResp.Kvs) == 0 && getResp.More { @@ -798,6 +796,24 @@ func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, er return []clientv3.OpOption{clientv3.WithLease(id)}, nil } +// ensureMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is +// greater than the most recent actualRevision available from storage. +func (s *store) ensureMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error { + if minimumResourceVersion == "" { + return nil + } + minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + // Enforce the storage.Interface guarantee that the resource version of the returned data + // "will be at least 'resourceVersion'". + if minimumRV > actualRevision { + return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0) + } + return nil +} + // decode decodes value of bytes into object. It will also set the object resource version to rev. // On success, objPtr would be set to the object. func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index f2b16f3bd75..a66c9eb9e4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -31,7 +31,7 @@ import ( "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/value" - "github.com/coreos/etcd/clientv3" + "go.etcd.io/etcd/clientv3" "k8s.io/klog" ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 37c65948e9a..cbf50b2112a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -36,9 +36,9 @@ type TransportConfig struct { // ServerList is the list of storage servers to connect with. ServerList []string // TLS credentials - KeyFile string - CertFile string - CAFile string + KeyFile string + CertFile string + TrustedCAFile string // function to determine the egress dialer. (i.e. konnectivity server dialer) EgressLookup egressselector.Lookup } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD index aa6d1827955..594c1d396ee 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD @@ -20,8 +20,8 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/apis/example/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/testingcert:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", - "//vendor/github.com/coreos/etcd/integration:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/go.etcd.io/etcd/integration:go_default_library", + "//vendor/go.etcd.io/etcd/pkg/transport:go_default_library", ], ) @@ -42,9 +42,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", + "//vendor/go.etcd.io/etcd/clientv3:go_default_library", + "//vendor/go.etcd.io/etcd/pkg/transport:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 1d01626291a..81a24825b9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -26,9 +26,9 @@ import ( "sync/atomic" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/transport" grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/transport" "google.golang.org/grpc" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -86,8 +86,8 @@ func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) { client := clientValue.Load().(*clientv3.Client) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - // See https://github.com/etcd-io/etcd/blob/master/etcdctl/ctlv3/command/ep_command.go#L118 - _, err := client.Get(ctx, path.Join(c.Prefix, "health")) + // See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118 + _, err := client.Get(ctx, path.Join("/", c.Prefix, "health")) if err == nil { return nil } @@ -97,9 +97,9 @@ func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) { func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) { tlsInfo := transport.TLSInfo{ - CertFile: c.CertFile, - KeyFile: c.KeyFile, - CAFile: c.CAFile, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + TrustedCAFile: c.TrustedCAFile, } tlsConfig, err := tlsInfo.ClientConfig() if err != nil { @@ -107,7 +107,7 @@ func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) } // NOTE: Client relies on nil tlsConfig // for non-secure connections, update the implicit variable - if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.CAFile) == 0 { + if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 { tlsConfig = nil } networkContext := egressselector.Etcd.AsNetworkContext() diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/BUILD index 8ddbb531961..ffe57c748f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/BUILD @@ -14,8 +14,8 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus/testutil:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/status:go_default_library", ], @@ -30,9 +30,9 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage/value", importpath = "k8s.io/apiserver/pkg/storage/value", deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go index f35994688bc..3d7f30bfc71 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go @@ -20,7 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc/status" "k8s.io/component-base/metrics" @@ -49,7 +48,7 @@ var ( Help: "Latencies in seconds of value transformation operations.", // In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when // external KMS is involved latencies may climb into milliseconds. - Buckets: prometheus.ExponentialBuckets(5e-6, 2, 14), + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), StabilityLevel: metrics.ALPHA, }, []string{"transformation_type"}, @@ -59,11 +58,12 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "transformation_latencies_microseconds", - Help: "(Deprecated) Latencies in microseconds of value transformation operations.", + Help: "Latencies in microseconds of value transformation operations.", // In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when // external KMS is involved latencies may climb into milliseconds. - Buckets: prometheus.ExponentialBuckets(5, 2, 14), - StabilityLevel: metrics.ALPHA, + Buckets: metrics.ExponentialBuckets(5, 2, 14), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{"transformation_type"}, ) @@ -81,11 +81,12 @@ var ( deprecatedTransformerFailuresTotal = metrics.NewCounterVec( &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "transformation_failures_total", - Help: "(Deprecated) Total number of failed transformation operations.", - StabilityLevel: metrics.ALPHA, + Namespace: namespace, + Subsystem: subsystem, + Name: "transformation_failures_total", + Help: "Total number of failed transformation operations.", + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.15.0", }, []string{"transformation_type"}, ) @@ -106,18 +107,19 @@ var ( Subsystem: subsystem, Name: "data_key_generation_duration_seconds", Help: "Latencies in seconds of data encryption key(DEK) generation operations.", - Buckets: prometheus.ExponentialBuckets(5e-6, 2, 14), + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), StabilityLevel: metrics.ALPHA, }, ) deprecatedDataKeyGenerationLatencies = metrics.NewHistogram( &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "data_key_generation_latencies_microseconds", - Help: "(Deprecated) Latencies in microseconds of data encryption key(DEK) generation operations.", - Buckets: prometheus.ExponentialBuckets(5, 2, 14), - StabilityLevel: metrics.ALPHA, + Namespace: namespace, + Subsystem: subsystem, + Name: "data_key_generation_latencies_microseconds", + Help: "Latencies in microseconds of data encryption key(DEK) generation operations.", + Buckets: metrics.ExponentialBuckets(5, 2, 14), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) dataKeyGenerationFailuresTotal = metrics.NewCounter( diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go index bb0f1157c34..3bc41cd9a24 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go @@ -22,6 +22,8 @@ import ( "fmt" "sync" "time" + + "k8s.io/apimachinery/pkg/util/errors" ) func init() { @@ -129,6 +131,7 @@ func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transfo // the first transformer. func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) ([]byte, bool, error) { start := time.Now() + var errs []error for i, transformer := range t.transformers { if bytes.HasPrefix(data, transformer.Prefix) { result, stale, err := transformer.Transformer.TransformFromStorage(data[len(transformer.Prefix):], context) @@ -144,9 +147,48 @@ func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) } else { RecordTransformation("from_storage", string(transformer.Prefix), start, err) } + + // It is valid to have overlapping prefixes when the same encryption provider + // is specified multiple times but with different keys (the first provider is + // being rotated to and some later provider is being rotated away from). + // + // Example: + // + // { + // "aescbc": { + // "keys": [ + // { + // "name": "2", + // "secret": "some key 2" + // } + // ] + // } + // }, + // { + // "aescbc": { + // "keys": [ + // { + // "name": "1", + // "secret": "some key 1" + // } + // ] + // } + // }, + // + // The transformers for both aescbc configs share the prefix k8s:enc:aescbc:v1: + // but a failure in the first one should not prevent a later match from being attempted. + // Thus we never short-circuit on a prefix match that results in an error. + if err != nil { + errs = append(errs, err) + continue + } + return result, stale || i != 0, err } } + if err := errors.Reduce(errors.NewAggregate(errs)); err != nil { + return nil, false, err + } RecordTransformation("from_storage", "unknown", start, t.err) return nil, false, t.err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index eb6c17bdb6b..917d66c9222 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -18,6 +18,7 @@ limitations under the License. package webhook import ( + "context" "fmt" "time" @@ -35,9 +36,28 @@ import ( // timeout of the HTTP request, including reading the response body. const defaultRequestTimeout = 30 * time.Second +// GenericWebhook defines a generic client for webhooks with commonly used capabilities, +// such as retry requests. type GenericWebhook struct { RestClient *rest.RESTClient InitialBackoff time.Duration + ShouldRetry func(error) bool +} + +// DefaultShouldRetry is a default implementation for the GenericWebhook ShouldRetry function property. +// If the error reason is one of: networking (connection reset) or http (InternalServerError (500), GatewayTimeout (504), TooManyRequests (429)), +// or apierrors.SuggestsClientDelay() returns true, then the function advises a retry. +// Otherwise it returns false for an immediate fail. +func DefaultShouldRetry(err error) bool { + // these errors indicate a transient error that should be retried. + if net.IsConnectionReset(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { + return true + } + // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. + if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { + return true + } + return false } // NewGenericWebhook creates a new GenericWebhook from the provided kubeconfig file. @@ -76,23 +96,28 @@ func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFact return nil, err } - return &GenericWebhook{restClient, initialBackoff}, nil + return &GenericWebhook{restClient, initialBackoff, DefaultShouldRetry}, nil } // WithExponentialBackoff will retry webhookFn() up to 5 times with exponentially increasing backoff when -// it returns an error for which apierrors.SuggestsClientDelay() or apierrors.IsInternalError() returns true. -func (g *GenericWebhook) WithExponentialBackoff(webhookFn func() rest.Result) rest.Result { +// it returns an error for which this GenericWebhook's ShouldRetry function returns true, confirming it to +// be retriable. If no ShouldRetry has been defined for the webhook, then the default one is used (DefaultShouldRetry). +func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn func() rest.Result) rest.Result { var result rest.Result - WithExponentialBackoff(g.InitialBackoff, func() error { + shouldRetry := g.ShouldRetry + if shouldRetry == nil { + shouldRetry = DefaultShouldRetry + } + WithExponentialBackoff(ctx, g.InitialBackoff, func() error { result = webhookFn() return result.Error() - }) + }, shouldRetry) return result } -// WithExponentialBackoff will retry webhookFn() up to 5 times with exponentially increasing backoff when -// it returns an error for which apierrors.SuggestsClientDelay() or apierrors.IsInternalError() returns true. -func WithExponentialBackoff(initialBackoff time.Duration, webhookFn func() error) error { +// WithExponentialBackoff will retry webhookFn up to 5 times with exponentially increasing backoff when +// it returns an error for which shouldRetry returns true, confirming it to be retriable. +func WithExponentialBackoff(ctx context.Context, initialBackoff time.Duration, webhookFn func() error, shouldRetry func(error) bool) error { backoff := wait.Backoff{ Duration: initialBackoff, Factor: 1.5, @@ -103,12 +128,11 @@ func WithExponentialBackoff(initialBackoff time.Duration, webhookFn func() error var err error wait.ExponentialBackoff(backoff, func() (bool, error) { err = webhookFn() - // these errors indicate a transient error that should be retried. - if net.IsConnectionReset(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { - return false, nil + if ctx.Err() != nil { + // we timed out or were cancelled, we should not retry + return true, err } - // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. - if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { + if shouldRetry(err) { return false, nil } if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD index eb13b669bdf..e374e481bd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD @@ -10,16 +10,21 @@ go_test( name = "go_default_test", srcs = [ "certs_test.go", - "webhook_test.go", + "round_trip_test.go", + "webhook_v1_test.go", + "webhook_v1beta1_test.go", ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/token/cache:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", ], ) @@ -29,6 +34,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", importpath = "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", deps = [ + "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -36,7 +42,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index e13985d726b..53c5d2285bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -20,30 +20,32 @@ package webhook import ( "context" "errors" + "fmt" "time" - authentication "k8s.io/api/authentication/v1beta1" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" - authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + authenticationv1client "k8s.io/client-go/kubernetes/typed/authentication/v1" "k8s.io/klog" ) -var ( - groupVersions = []schema.GroupVersion{authentication.SchemeGroupVersion} -) - const retryBackoff = 500 * time.Millisecond // Ensure WebhookTokenAuthenticator implements the authenticator.Token interface. var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) +type tokenReviewer interface { + CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) +} + type WebhookTokenAuthenticator struct { - tokenReview authenticationclient.TokenReviewInterface + tokenReview tokenReviewer initialBackoff time.Duration implicitAuds authenticator.Audiences } @@ -52,7 +54,7 @@ type WebhookTokenAuthenticator struct { // client. It is recommend to wrap this authenticator with the token cache // authenticator implemented in // k8s.io/apiserver/pkg/authentication/token/cache. -func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { +func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } @@ -60,8 +62,8 @@ func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, imp // file. It is recommend to wrap this authenticator with the token cache // authenticator implemented in // k8s.io/apiserver/pkg/authentication/token/cache. -func New(kubeConfigFile string, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { - tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile) +func New(kubeConfigFile string, version string, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile, version) if err != nil { return nil, err } @@ -69,7 +71,7 @@ func New(kubeConfigFile string, implicitAuds authenticator.Audiences) (*WebhookT } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(tokenReview authenticationclient.TokenReviewInterface, initialBackoff time.Duration, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { +func newWithBackoff(tokenReview tokenReviewer, initialBackoff time.Duration, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { return &WebhookTokenAuthenticator{tokenReview, initialBackoff, implicitAuds}, nil } @@ -87,21 +89,21 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token // intersection in the response. // * otherwise return unauthenticated. wantAuds, checkAuds := authenticator.AudiencesFrom(ctx) - r := &authentication.TokenReview{ - Spec: authentication.TokenReviewSpec{ + r := &authenticationv1.TokenReview{ + Spec: authenticationv1.TokenReviewSpec{ Token: token, Audiences: wantAuds, }, } var ( - result *authentication.TokenReview + result *authenticationv1.TokenReview err error auds authenticator.Audiences ) - webhook.WithExponentialBackoff(w.initialBackoff, func() error { - result, err = w.tokenReview.Create(r) + webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { + result, err = w.tokenReview.CreateContext(ctx, r) return err - }) + }, webhook.DefaultShouldRetry) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. klog.Errorf("Failed to make webhook authenticator request: %v", err) @@ -150,28 +152,99 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token // tokenReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, // and returns a TokenReviewInterface that uses that client. Note that the client submits TokenReview // requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. -func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string) (authenticationclient.TokenReviewInterface, error) { +func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string) (tokenReviewer, error) { localScheme := runtime.NewScheme() if err := scheme.AddToScheme(localScheme); err != nil { return nil, err } - if err := localScheme.SetVersionPriority(groupVersions...); err != nil { - return nil, err + + switch version { + case authenticationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) + if err != nil { + return nil, err + } + return &tokenReviewV1Client{gw}, nil + + case authenticationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) + if err != nil { + return nil, err + } + return &tokenReviewV1beta1Client{gw}, nil + + default: + return nil, fmt.Errorf( + "unsupported authentication webhook version %q, supported versions are %q, %q", + version, + authenticationv1.SchemeGroupVersion.Version, + authenticationv1beta1.SchemeGroupVersion.Version, + ) } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) +} + +type tokenReviewV1Client struct { + w *webhook.GenericWebhook +} + +func (t *tokenReviewV1Client) CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { + result := &authenticationv1.TokenReview{} + err := t.w.RestClient.Post().Context(ctx).Body(review).Do().Into(result) + return result, err +} + +type tokenReviewV1beta1Client struct { + w *webhook.GenericWebhook +} + +func (t *tokenReviewV1beta1Client) CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { + v1beta1Review := &authenticationv1beta1.TokenReview{Spec: v1SpecToV1beta1Spec(&review.Spec)} + v1beta1Result := &authenticationv1beta1.TokenReview{} + err := t.w.RestClient.Post().Context(ctx).Body(v1beta1Review).Do().Into(v1beta1Result) if err != nil { return nil, err } - return &tokenReviewClient{gw}, nil + review.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + return review, nil } -type tokenReviewClient struct { - w *webhook.GenericWebhook +func v1SpecToV1beta1Spec(in *authenticationv1.TokenReviewSpec) authenticationv1beta1.TokenReviewSpec { + return authenticationv1beta1.TokenReviewSpec{ + Token: in.Token, + Audiences: in.Audiences, + } } -func (t *tokenReviewClient) Create(tokenReview *authentication.TokenReview) (*authentication.TokenReview, error) { - result := &authentication.TokenReview{} - err := t.w.RestClient.Post().Body(tokenReview).Do().Into(result) - return result, err +func v1beta1StatusToV1Status(in *authenticationv1beta1.TokenReviewStatus) authenticationv1.TokenReviewStatus { + return authenticationv1.TokenReviewStatus{ + Authenticated: in.Authenticated, + User: v1beta1UserToV1User(in.User), + Audiences: in.Audiences, + Error: in.Error, + } +} + +func v1beta1UserToV1User(u authenticationv1beta1.UserInfo) authenticationv1.UserInfo { + var extra map[string]authenticationv1.ExtraValue + if u.Extra != nil { + extra = make(map[string]authenticationv1.ExtraValue, len(u.Extra)) + for k, v := range u.Extra { + extra[k] = authenticationv1.ExtraValue(v) + } + } + return authenticationv1.UserInfo{ + Username: u.Username, + UID: u.UID, + Groups: u.Groups, + Extra: extra, + } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD index 270aed93282..2c15fa97631 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD @@ -10,16 +10,20 @@ go_test( name = "go_default_test", srcs = [ "certs_test.go", - "webhook_test.go", + "round_trip_test.go", + "webhook_v1_test.go", + "webhook_v1beta1_test.go", ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/authorization/v1:go_default_library", "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", ], ) @@ -29,6 +33,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook", importpath = "k8s.io/apiserver/plugin/pkg/authorizer/webhook", deps = [ + "//staging/src/k8s.io/api/authorization/v1:go_default_library", "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -37,7 +42,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 52da85980a4..2a0678e8e3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -18,13 +18,15 @@ limitations under the License. package webhook import ( + "context" "encoding/json" "fmt" "time" "k8s.io/klog" - authorization "k8s.io/api/authorization/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" @@ -32,11 +34,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" - authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" -) - -var ( - groupVersions = []schema.GroupVersion{authorization.SchemeGroupVersion} + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" ) const ( @@ -48,8 +46,12 @@ const ( // Ensure Webhook implements the authorizer.Authorizer interface. var _ authorizer.Authorizer = (*WebhookAuthorizer)(nil) +type subjectAccessReviewer interface { + CreateContext(context.Context, *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) +} + type WebhookAuthorizer struct { - subjectAccessReview authorizationclient.SubjectAccessReviewInterface + subjectAccessReview subjectAccessReviewer responseCache *cache.LRUExpireCache authorizedTTL time.Duration unauthorizedTTL time.Duration @@ -58,12 +60,11 @@ type WebhookAuthorizer struct { } // NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client -func NewFromInterface(subjectAccessReview authorizationclient.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL time.Duration) (*WebhookAuthorizer, error) { +func NewFromInterface(subjectAccessReview authorizationv1client.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL time.Duration) (*WebhookAuthorizer, error) { return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff) } // New creates a new WebhookAuthorizer from the provided kubeconfig file. -// // The config's cluster field is used to refer to the remote service, user refers to the returned authorizer. // // # clusters refers to the remote service. @@ -82,8 +83,8 @@ func NewFromInterface(subjectAccessReview authorizationclient.SubjectAccessRevie // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. -func New(kubeConfigFile string, authorizedTTL, unauthorizedTTL time.Duration) (*WebhookAuthorizer, error) { - subjectAccessReview, err := subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile) +func New(kubeConfigFile string, version string, authorizedTTL, unauthorizedTTL time.Duration) (*WebhookAuthorizer, error) { + subjectAccessReview, err := subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile, version) if err != nil { return nil, err } @@ -91,7 +92,7 @@ func New(kubeConfigFile string, authorizedTTL, unauthorizedTTL time.Duration) (* } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(subjectAccessReview authorizationclient.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL, initialBackoff time.Duration) (*WebhookAuthorizer, error) { +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL, initialBackoff time.Duration) (*WebhookAuthorizer, error) { return &WebhookAuthorizer{ subjectAccessReview: subjectAccessReview, responseCache: cache.NewLRUExpireCache(1024), @@ -149,10 +150,10 @@ func newWithBackoff(subjectAccessReview authorizationclient.SubjectAccessReviewI // TODO(mikedanese): We should eventually support failing closed when we // encounter an error. We are failing open now to preserve backwards compatible // behavior. -func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { - r := &authorization.SubjectAccessReview{} +func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + r := &authorizationv1.SubjectAccessReview{} if user := attr.GetUser(); user != nil { - r.Spec = authorization.SubjectAccessReviewSpec{ + r.Spec = authorizationv1.SubjectAccessReviewSpec{ User: user.GetName(), UID: user.GetUID(), Groups: user.GetGroups(), @@ -161,7 +162,7 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth } if attr.IsResourceRequest() { - r.Spec.ResourceAttributes = &authorization.ResourceAttributes{ + r.Spec.ResourceAttributes = &authorizationv1.ResourceAttributes{ Namespace: attr.GetNamespace(), Verb: attr.GetVerb(), Group: attr.GetAPIGroup(), @@ -171,7 +172,7 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth Name: attr.GetName(), } } else { - r.Spec.NonResourceAttributes = &authorization.NonResourceAttributes{ + r.Spec.NonResourceAttributes = &authorizationv1.NonResourceAttributes{ Path: attr.GetPath(), Verb: attr.GetVerb(), } @@ -181,16 +182,16 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth return w.decisionOnError, "", err } if entry, ok := w.responseCache.Get(string(key)); ok { - r.Status = entry.(authorization.SubjectAccessReviewStatus) + r.Status = entry.(authorizationv1.SubjectAccessReviewStatus) } else { var ( - result *authorization.SubjectAccessReview + result *authorizationv1.SubjectAccessReview err error ) - webhook.WithExponentialBackoff(w.initialBackoff, func() error { - result, err = w.subjectAccessReview.Create(r) + webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { + result, err = w.subjectAccessReview.CreateContext(ctx, r) return err - }) + }, webhook.DefaultShouldRetry) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. klog.Errorf("Failed to make webhook authorizer request: %v", err) @@ -228,13 +229,13 @@ func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]author return resourceRules, nonResourceRules, incomplete, fmt.Errorf("webhook authorizer does not support user rule resolution") } -func convertToSARExtra(extra map[string][]string) map[string]authorization.ExtraValue { +func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.ExtraValue { if extra == nil { return nil } - ret := map[string]authorization.ExtraValue{} + ret := map[string]authorizationv1.ExtraValue{} for k, v := range extra { - ret[k] = authorization.ExtraValue(v) + ret[k] = authorizationv1.ExtraValue(v) } return ret @@ -243,32 +244,69 @@ func convertToSARExtra(extra map[string][]string) map[string]authorization.Extra // subjectAccessReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, // and returns a SubjectAccessReviewInterface that uses that client. Note that the client submits SubjectAccessReview // requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. -func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string) (authorizationclient.SubjectAccessReviewInterface, error) { +func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version string) (subjectAccessReviewer, error) { localScheme := runtime.NewScheme() if err := scheme.AddToScheme(localScheme); err != nil { return nil, err } - if err := localScheme.SetVersionPriority(groupVersions...); err != nil { - return nil, err - } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) - if err != nil { - return nil, err + switch version { + case authorizationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1Client{gw}, nil + + case authorizationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1beta1Client{gw}, nil + + default: + return nil, fmt.Errorf( + "unsupported webhook authorizer version %q, supported versions are %q, %q", + version, + authorizationv1.SchemeGroupVersion.Version, + authorizationv1beta1.SchemeGroupVersion.Version, + ) } - return &subjectAccessReviewClient{gw}, nil } -type subjectAccessReviewClient struct { +type subjectAccessReviewV1Client struct { w *webhook.GenericWebhook } -func (t *subjectAccessReviewClient) Create(subjectAccessReview *authorization.SubjectAccessReview) (*authorization.SubjectAccessReview, error) { - result := &authorization.SubjectAccessReview{} - err := t.w.RestClient.Post().Body(subjectAccessReview).Do().Into(result) +func (t *subjectAccessReviewV1Client) CreateContext(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { + result := &authorizationv1.SubjectAccessReview{} + err := t.w.RestClient.Post().Context(ctx).Body(subjectAccessReview).Do().Into(result) return result, err } +type subjectAccessReviewV1beta1Client struct { + w *webhook.GenericWebhook +} + +func (t *subjectAccessReviewV1beta1Client) CreateContext(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { + v1beta1Review := &authorizationv1beta1.SubjectAccessReview{Spec: v1SpecToV1beta1Spec(&subjectAccessReview.Spec)} + v1beta1Result := &authorizationv1beta1.SubjectAccessReview{} + err := t.w.RestClient.Post().Context(ctx).Body(v1beta1Review).Do().Into(v1beta1Result) + if err == nil { + subjectAccessReview.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + } + return subjectAccessReview, err +} + // shouldCache determines whether it is safe to cache the given request attributes. If the // requester-controlled attributes are too large, this may be a DoS attempt, so we skip the cache. func shouldCache(attr authorizer.Attributes) bool { @@ -282,3 +320,59 @@ func shouldCache(attr authorizer.Attributes) bool { int64(len(attr.GetPath())) return controlledAttrSize < maxControlledAttrCacheSize } + +func v1beta1StatusToV1Status(in *authorizationv1beta1.SubjectAccessReviewStatus) authorizationv1.SubjectAccessReviewStatus { + return authorizationv1.SubjectAccessReviewStatus{ + Allowed: in.Allowed, + Denied: in.Denied, + Reason: in.Reason, + EvaluationError: in.EvaluationError, + } +} + +func v1SpecToV1beta1Spec(in *authorizationv1.SubjectAccessReviewSpec) authorizationv1beta1.SubjectAccessReviewSpec { + return authorizationv1beta1.SubjectAccessReviewSpec{ + ResourceAttributes: v1ResourceAttributesToV1beta1ResourceAttributes(in.ResourceAttributes), + NonResourceAttributes: v1NonResourceAttributesToV1beta1NonResourceAttributes(in.NonResourceAttributes), + User: in.User, + Groups: in.Groups, + Extra: v1ExtraToV1beta1Extra(in.Extra), + UID: in.UID, + } +} + +func v1ResourceAttributesToV1beta1ResourceAttributes(in *authorizationv1.ResourceAttributes) *authorizationv1beta1.ResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.ResourceAttributes{ + Namespace: in.Namespace, + Verb: in.Verb, + Group: in.Group, + Version: in.Version, + Resource: in.Resource, + Subresource: in.Subresource, + Name: in.Name, + } +} + +func v1NonResourceAttributesToV1beta1NonResourceAttributes(in *authorizationv1.NonResourceAttributes) *authorizationv1beta1.NonResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.NonResourceAttributes{ + Path: in.Path, + Verb: in.Verb, + } +} + +func v1ExtraToV1beta1Extra(in map[string]authorizationv1.ExtraValue) map[string]authorizationv1beta1.ExtraValue { + if in == nil { + return nil + } + ret := make(map[string]authorizationv1beta1.ExtraValue, len(in)) + for k, v := range in { + ret[k] = authorizationv1beta1.ExtraValue(v) + } + return ret +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD index 5a923a9b4f0..a3d8a4520cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD @@ -40,8 +40,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/scheme.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/scheme.go index 4596104d8a6..3168c872cf4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/scheme.go @@ -18,11 +18,11 @@ package dynamic import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) var watchScheme = runtime.NewScheme() @@ -41,37 +41,6 @@ func init() { metav1.AddToGroupVersion(deleteScheme, versionV1) } -var watchJsonSerializerInfo = runtime.SerializerInfo{ - MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, true), - StreamSerializer: &runtime.StreamSerializerInfo{ - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), - Framer: json.Framer, - }, -} - -// watchNegotiatedSerializer is used to read the wrapper of the watch stream -type watchNegotiatedSerializer struct{} - -var watchNegotiatedSerializerInstance = watchNegotiatedSerializer{} - -func (s watchNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{watchJsonSerializerInfo} -} - -func (s watchNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(watchScheme, encoder, nil, gv, nil) -} - -func (s watchNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return versioning.NewDefaultingCodecForScheme(watchScheme, nil, decoder, nil, gv) -} - // basicNegotiatedSerializer is used to handle discovery and error handling serialization type basicNegotiatedSerializer struct{} @@ -82,8 +51,8 @@ func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInf MediaTypeType: "application", MediaTypeSubType: "json", EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, true), + Serializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true), StreamSerializer: &runtime.StreamSerializerInfo{ EncodesAsText: true, Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), @@ -94,9 +63,46 @@ func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInf } func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(watchScheme, encoder, nil, gv, nil) + return runtime.WithVersionEncoder{ + Version: gv, + Encoder: encoder, + ObjectTyper: unstructuredTyper{basicScheme}, + } } func (s basicNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return versioning.NewDefaultingCodecForScheme(watchScheme, nil, decoder, nil, gv) + return decoder +} + +type unstructuredCreater struct { + nested runtime.ObjectCreater +} + +func (c unstructuredCreater) New(kind schema.GroupVersionKind) (runtime.Object, error) { + out, err := c.nested.New(kind) + if err == nil { + return out, nil + } + out = &unstructured.Unstructured{} + out.GetObjectKind().SetGroupVersionKind(kind) + return out, nil +} + +type unstructuredTyper struct { + nested runtime.ObjectTyper +} + +func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + kinds, unversioned, err := t.nested.ObjectKinds(obj) + if err == nil { + return kinds, unversioned, nil + } + if _, ok := obj.(runtime.Unstructured); ok && !obj.GetObjectKind().GroupVersionKind().Empty() { + return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil + } + return nil, false, err +} + +func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return true } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go index 4e0ef5a7dca..8a026788aef 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go @@ -18,14 +18,12 @@ package dynamic import ( "fmt" - "io" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" @@ -282,31 +280,10 @@ func (c *dynamicResourceClient) List(opts metav1.ListOptions) (*unstructured.Uns } func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, error) { - internalGV := schema.GroupVersions{ - {Group: c.resource.Group, Version: runtime.APIVersionInternal}, - // always include the legacy group as a decoding target to handle non-error `Status` return types - {Group: "", Version: runtime.APIVersionInternal}, - } - s := &rest.Serializers{ - Encoder: watchNegotiatedSerializerInstance.EncoderForVersion(watchJsonSerializerInfo.Serializer, c.resource.GroupVersion()), - Decoder: watchNegotiatedSerializerInstance.DecoderToVersion(watchJsonSerializerInfo.Serializer, internalGV), - - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - return watchNegotiatedSerializerInstance.DecoderToVersion(watchJsonSerializerInfo.Serializer, internalGV), nil - }, - StreamingSerializer: watchJsonSerializerInfo.StreamSerializer.Serializer, - Framer: watchJsonSerializerInfo.StreamSerializer.Framer, - } - - wrappedDecoderFn := func(body io.ReadCloser) streaming.Decoder { - framer := s.Framer.NewFrameReader(body) - return streaming.NewDecoder(framer, s.StreamingSerializer) - } - opts.Watch = true return c.client.client.Get().AbsPath(c.makeURLSegments("")...). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - WatchWithSpecificDecoders(wrappedDecoderFn, unstructured.UnstructuredJSONScheme) + Watch() } func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD index 156617e794a..2eb50fb94df 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD @@ -27,8 +27,10 @@ go_library( "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/api/networking/v1:go_default_library", "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/api/node/v1alpha1:go_default_library", @@ -58,6 +60,7 @@ go_library( "//staging/src/k8s.io/client-go/informers/discovery:go_default_library", "//staging/src/k8s.io/client-go/informers/events:go_default_library", "//staging/src/k8s.io/client-go/informers/extensions:go_default_library", + "//staging/src/k8s.io/client-go/informers/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//staging/src/k8s.io/client-go/informers/networking:go_default_library", "//staging/src/k8s.io/client-go/informers/node:go_default_library", @@ -93,6 +96,7 @@ filegroup( "//staging/src/k8s.io/client-go/informers/discovery:all-srcs", "//staging/src/k8s.io/client-go/informers/events:all-srcs", "//staging/src/k8s.io/client-go/informers/extensions:all-srcs", + "//staging/src/k8s.io/client-go/informers/flowcontrol:all-srcs", "//staging/src/k8s.io/client-go/informers/internalinterfaces:all-srcs", "//staging/src/k8s.io/client-go/informers/networking:all-srcs", "//staging/src/k8s.io/client-go/informers/node:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/BUILD index df3ac630f35..9a187d17dde 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/client-go/informers/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/informers/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", ], ) @@ -24,6 +25,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/client-go/informers/discovery/v1alpha1:all-srcs", + "//staging/src/k8s.io/client-go/informers/discovery/v1beta1:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/interface.go index eb8062feec7..c0cae3314a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/interface.go @@ -20,6 +20,7 @@ package discovery import ( v1alpha1 "k8s.io/client-go/informers/discovery/v1alpha1" + v1beta1 "k8s.io/client-go/informers/discovery/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -27,6 +28,8 @@ import ( type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/BUILD new file mode 100644 index 00000000000..ec7b58f8868 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "endpointslice.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/discovery/v1beta1", + importpath = "k8s.io/client-go/informers/discovery/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000000..f658866c2a7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/discovery/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointSliceInformer provides access to a shared informer and lister for +// EndpointSlices. +type EndpointSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.EndpointSliceLister +} + +type endpointSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1beta1().EndpointSlices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(options) + }, + }, + &discoveryv1beta1.EndpointSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&discoveryv1beta1.EndpointSlice{}, f.defaultInformer) +} + +func (f *endpointSliceInformer) Lister() v1beta1.EndpointSliceLister { + return v1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go new file mode 100644 index 00000000000..4661646e013 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EndpointSlices returns a EndpointSliceInformer. + EndpointSlices() EndpointSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EndpointSlices returns a EndpointSliceInformer. +func (v *version) EndpointSlices() EndpointSliceInformer { + return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go index c5230a491d3..dbbc8f5e9ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go @@ -37,6 +37,7 @@ import ( discovery "k8s.io/client-go/informers/discovery" events "k8s.io/client-go/informers/events" extensions "k8s.io/client-go/informers/extensions" + flowcontrol "k8s.io/client-go/informers/flowcontrol" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" networking "k8s.io/client-go/informers/networking" node "k8s.io/client-go/informers/node" @@ -200,6 +201,7 @@ type SharedInformerFactory interface { Discovery() discovery.Interface Events() events.Interface Extensions() extensions.Interface + Flowcontrol() flowcontrol.Interface Networking() networking.Interface Node() node.Interface Policy() policy.Interface @@ -253,6 +255,10 @@ func (f *sharedInformerFactory) Extensions() extensions.Interface { return extensions.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Flowcontrol() flowcontrol.Interface { + return flowcontrol.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Networking() networking.Interface { return networking.New(f, f.namespace, f.tweakListOptions) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/BUILD new file mode 100644 index 00000000000..0166429fa75 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/flowcontrol", + importpath = "k8s.io/client-go/informers/flowcontrol", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/interface.go new file mode 100644 index 00000000000..27e68efe833 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package flowcontrol + +import ( + v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/BUILD new file mode 100644 index 00000000000..742eaf7a406 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "flowschema.go", + "interface.go", + "prioritylevelconfiguration.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1", + importpath = "k8s.io/client-go/informers/flowcontrol/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000000..af1d874c72f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.FlowSchemaLister +} + +type flowSchemaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().FlowSchemas().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().FlowSchemas().Watch(options) + }, + }, + &flowcontrolv1alpha1.FlowSchema{}, + resyncPeriod, + indexers, + ) +} + +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1alpha1.FlowSchema{}, f.defaultInformer) +} + +func (f *flowSchemaInformer) Lister() v1alpha1.FlowSchemaLister { + return v1alpha1.NewFlowSchemaLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go new file mode 100644 index 00000000000..7097c0058bf --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000000..c145b7d4114 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(options) + }, + }, + &flowcontrolv1alpha1.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1alpha1.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1alpha1.PriorityLevelConfigurationLister { + return v1alpha1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go index 58f39460c26..8e6df2461a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go @@ -38,8 +38,10 @@ import ( coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -207,6 +209,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case discoveryv1alpha1.SchemeGroupVersion.WithResource("endpointslices"): return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1alpha1().EndpointSlices().Informer()}, nil + // Group=discovery.k8s.io, Version=v1beta1 + case discoveryv1beta1.SchemeGroupVersion.WithResource("endpointslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1beta1().EndpointSlices().Informer()}, nil + // Group=events.k8s.io, Version=v1beta1 case eventsv1beta1.SchemeGroupVersion.WithResource("events"): return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil @@ -225,6 +231,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil + // Group=networking.k8s.io, Version=v1 case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil @@ -294,6 +306,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil // Group=storage.k8s.io, Version=v1 + case storagev1.SchemeGroupVersion.WithResource("csinodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSINodes().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD index 25f3894667f..e0d5df88a19 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = [ + "csinode.go", "interface.go", "storageclass.go", "volumeattachment.go", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go new file mode 100644 index 00000000000..eed947c4a6f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" +) + +// CSINodeInformer provides access to a shared informer and lister for +// CSINodes. +type CSINodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CSINodeLister +} + +type cSINodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().CSINodes().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().CSINodes().Watch(options) + }, + }, + &storagev1.CSINode{}, + resyncPeriod, + indexers, + ) +} + +func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1.CSINode{}, f.defaultInformer) +} + +func (f *cSINodeInformer) Lister() v1.CSINodeLister { + return v1.NewCSINodeLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go index 64fc2bd8436..59f367d3399 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -24,6 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CSINodes returns a CSINodeInformer. + CSINodes() CSINodeInformer // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. @@ -41,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CSINodes returns a CSINodeInformer. +func (v *version) CSINodes() CSINodeInformer { + return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD index c3c79bf9c99..6858c78d879 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD @@ -33,8 +33,10 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1:go_default_library", @@ -89,8 +91,10 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go index f8a237f614d..cf98b05002e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -43,8 +43,10 @@ import ( coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" @@ -87,8 +89,10 @@ type Interface interface { CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface + DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface @@ -131,8 +135,10 @@ type Clientset struct { coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client + discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client @@ -255,6 +261,11 @@ func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Inter return c.discoveryV1alpha1 } +// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client +func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { + return c.discoveryV1beta1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 @@ -265,6 +276,11 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } +// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client +func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { + return c.flowcontrolV1alpha1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -445,6 +461,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -453,6 +473,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -546,8 +570,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c) + cs.discoveryV1beta1 = discoveryv1beta1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) + cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) @@ -592,8 +618,10 @@ func New(c rest.Interface) *Clientset { cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) cs.discoveryV1alpha1 = discoveryv1alpha1.New(c) + cs.discoveryV1beta1 = discoveryv1beta1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) + cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD index 23598a3490b..4753a9e10fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD @@ -32,8 +32,10 @@ go_library( "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/api/networking/v1:go_default_library", "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/api/node/v1alpha1:go_default_library", @@ -100,10 +102,14 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index e94ed73feae..ccb103cc3cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -66,10 +66,14 @@ import ( fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" fakediscoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake" + discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" + fakediscoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" + flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" @@ -255,6 +259,11 @@ func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Inter return &fakediscoveryv1alpha1.FakeDiscoveryV1alpha1{Fake: &c.Fake} } +// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client +func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { + return &fakediscoveryv1beta1.FakeDiscoveryV1beta1{Fake: &c.Fake} +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} @@ -265,6 +274,11 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } +// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client +func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { + return &fakeflowcontrolv1alpha1.FakeFlowcontrolV1alpha1{Fake: &c.Fake} +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go index 07d44565220..e88b99891d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -40,8 +40,10 @@ import ( coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -89,8 +91,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1.AddToScheme, corev1.AddToScheme, discoveryv1alpha1.AddToScheme, + discoveryv1beta1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, + flowcontrolv1alpha1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, nodev1alpha1.AddToScheme, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD index a9913350112..a78e6215750 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD @@ -31,8 +31,10 @@ go_library( "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/api/networking/v1:go_default_library", "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/api/node/v1alpha1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go index c1a2c5198a0..4d8e8b7f780 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -40,8 +40,10 @@ import ( coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -89,8 +91,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1.AddToScheme, corev1.AddToScheme, discoveryv1alpha1.AddToScheme, + discoveryv1beta1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, + flowcontrolv1alpha1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, nodev1alpha1.AddToScheme, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go index 7008c927cd4..610948ef104 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authenticationapi "k8s.io/api/authentication/v1" core "k8s.io/client-go/testing" ) func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *FakeTokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) return obj.(*authenticationapi.TokenReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go index ea21f1b4a2b..8a21b7c7641 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authenticationapi "k8s.io/api/authentication/v1" ) type TokenReviewExpansion interface { Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) + CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) } func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { result = &authenticationapi.TokenReview{} err = c.client.Post(). + Context(ctx). Resource("tokenreviews"). Body(tokenReview). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go index 92ef5d1a158..f9c487c3d8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authenticationapi "k8s.io/api/authentication/v1beta1" core "k8s.io/client-go/testing" ) func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *FakeTokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) return obj.(*authenticationapi.TokenReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go index 8f186fa76ae..0476b173594 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authenticationapi "k8s.io/api/authentication/v1beta1" ) type TokenReviewExpansion interface { Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) + CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) } func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { result = &authenticationapi.TokenReview{} err = c.client.Post(). + Context(ctx). Resource("tokenreviews"). Body(tokenReview). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go index a01e415c8fa..59c8773774c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeLocalSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) return obj.(*authorizationapi.LocalSubjectAccessReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go index 91acbe029e7..d3ee5a6a8bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeSelfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) return obj.(*authorizationapi.SelfSubjectAccessReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go index a6dc9513498..06f1cd69174 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *FakeSelfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) return obj.(*authorizationapi.SelfSubjectRulesReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go index a2a2f0697ec..6e3f3b45b35 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go index 0c123b07ce5..9836308bd6e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type LocalSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) } func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { result = &authorizationapi.LocalSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Namespace(c.ns). Resource("localsubjectaccessreviews"). Body(sar). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go index 5b70a27dd62..916e5b43f04 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type SelfSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) } func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { result = &authorizationapi.SelfSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectaccessreviews"). Body(sar). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go index e2cad880eb1..365282ed865 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type SelfSubjectRulesReviewExpansion interface { Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) + CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) } func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { result = &authorizationapi.SelfSubjectRulesReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectrulesreviews"). Body(srr). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go index b5ed87d3015..927544f127f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -17,17 +17,25 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. type SubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) } func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { result = &authorizationapi.SubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("subjectaccessreviews"). Body(sar). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go index 5211628f26e..f8580d28a8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeLocalSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) return obj.(*authorizationapi.LocalSubjectAccessReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go index 6e3af12a789..cf1fe787007 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeSelfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) return obj.(*authorizationapi.SelfSubjectAccessReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go index f92ffd717de..27410b81cd6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *FakeSelfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) return obj.(*authorizationapi.SelfSubjectRulesReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go index b0b18b099c0..721c5963c69 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go @@ -17,11 +17,17 @@ limitations under the License. package fake import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *FakeSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) return obj.(*authorizationapi.SubjectAccessReview), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go index bf1b8a5f108..148cf62823b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type LocalSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) } func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { result = &authorizationapi.LocalSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Namespace(c.ns). Resource("localsubjectaccessreviews"). Body(sar). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go index 58fecfd85bf..6edead0e77d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type SelfSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) } func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { result = &authorizationapi.SelfSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectaccessreviews"). Body(sar). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go index 5f1f37ef7ee..a459d5c3eae 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type SelfSubjectRulesReviewExpansion interface { Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) + CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) } func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { result = &authorizationapi.SelfSubjectRulesReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectrulesreviews"). Body(srr). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go index 4f93689e8ac..7072e29ca49 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -17,17 +17,25 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. type SubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) } func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { result = &authorizationapi.SubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("subjectaccessreviews"). Body(sar). Do(). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 6929ade1ddf..5a82afa42f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -104,17 +104,17 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if e.ns != "" && ref.Namespace != e.ns { + if len(e.ns) > 0 && ref.Namespace != e.ns { return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) } stringRefKind := string(ref.Kind) var refKind *string - if stringRefKind != "" { + if len(stringRefKind) > 0 { refKind = &stringRefKind } stringRefUID := string(ref.UID) var refUID *string - if stringRefUID != "" { + if len(stringRefUID) > 0 { refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) @@ -124,10 +124,9 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev // Returns the appropriate field selector based on the API version being used to communicate with the server. // The returned field selector can be used with List and Watch to filter desired events. func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() field := fields.Set{} if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + field["involvedObject.name"] = *involvedObjectName } if involvedObjectNamespace != nil { field["involvedObject.namespace"] = *involvedObjectNamespace @@ -142,6 +141,7 @@ func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, i } // Returns the appropriate field label to use for name of the involved object as per the given API version. +// DEPRECATED: please use "involvedObject.name" inline. func GetInvolvedObjectNameFieldLabel(version string) string { return "involvedObject.name" } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/BUILD new file mode 100644 index 00000000000..a197cd4c321 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "discovery_client.go", + "doc.go", + "endpointslice.go", + "generated_expansion.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1", + importpath = "k8s.io/client-go/kubernetes/typed/discovery/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go new file mode 100644 index 00000000000..997239a9508 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1beta1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1beta1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1beta1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1beta1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1beta1Client { + return &DiscoveryV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go similarity index 73% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/doc.go rename to cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go index b07da628d98..771101956f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package dbus provides an injectable interface and implementations for D-Bus communication -package dbus // import "k8s.io/kubernetes/pkg/util/dbus" +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000000..bba656b8fd8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) + Update(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/BUILD new file mode 100644 index 00000000000..d9a82c88e9b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_discovery_client.go", + "fake_endpointslice.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go new file mode 100644 index 00000000000..16f44399065 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go new file mode 100644 index 00000000000..e285de64764 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeDiscoveryV1beta1 struct { + *testing.Fake +} + +func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface { + return &FakeEndpointSlices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeDiscoveryV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go new file mode 100644 index 00000000000..ba13afea414 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEndpointSlices implements EndpointSliceInterface +type FakeEndpointSlices struct { + Fake *FakeDiscoveryV1beta1 + ns string +} + +var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"} + +var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSlice"} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta} + for _, item := range obj.(*v1beta1.EndpointSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *FakeEndpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) + return err +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *FakeEndpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go new file mode 100644 index 00000000000..1e7769f2770 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type EndpointSliceExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/BUILD new file mode 100644 index 00000000000..33015e81ee5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "flowcontrol_client.go", + "flowschema.go", + "generated_expansion.go", + "prioritylevelconfiguration.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1", + importpath = "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go new file mode 100644 index 00000000000..df51baa4d4c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/BUILD new file mode 100644 index 00000000000..db56b4ec142 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_flowcontrol_client.go", + "fake_flowschema.go", + "fake_prioritylevelconfiguration.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..16f44399065 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go new file mode 100644 index 00000000000..72d5eff0c26 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeFlowcontrolV1alpha1 struct { + *testing.Fake +} + +func (c *FakeFlowcontrolV1alpha1) FlowSchemas() v1alpha1.FlowSchemaInterface { + return &FakeFlowSchemas{c} +} + +func (c *FakeFlowcontrolV1alpha1) PriorityLevelConfigurations() v1alpha1.PriorityLevelConfigurationInterface { + return &FakePriorityLevelConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFlowcontrolV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go new file mode 100644 index 00000000000..d27f802e1b2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFlowSchemas implements FlowSchemaInterface +type FakeFlowSchemas struct { + Fake *FakeFlowcontrolV1alpha1 +} + +var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "flowschemas"} + +var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "FlowSchema"} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *FakeFlowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FlowSchema), err +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *FakeFlowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1alpha1.FlowSchemaList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.FlowSchemaList{ListMeta: obj.(*v1alpha1.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1alpha1.FlowSchemaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *FakeFlowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FlowSchema), err +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FlowSchema), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFlowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1alpha1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FlowSchema), err +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *FakeFlowSchemas) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFlowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.FlowSchemaList{}) + return err +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *FakeFlowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1alpha1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FlowSchema), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go new file mode 100644 index 00000000000..960481c2863 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type FakePriorityLevelConfigurations struct { + Fake *FakeFlowcontrolV1alpha1 +} + +var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "prioritylevelconfigurations"} + +var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "PriorityLevelConfiguration"} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *FakePriorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityLevelConfiguration), err +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *FakePriorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1alpha1.PriorityLevelConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PriorityLevelConfigurationList{ListMeta: obj.(*v1alpha1.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1alpha1.PriorityLevelConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *FakePriorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityLevelConfiguration), err +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityLevelConfiguration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePriorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityLevelConfiguration), err +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *FakePriorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PriorityLevelConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *FakePriorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1alpha1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityLevelConfiguration), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go new file mode 100644 index 00000000000..37a1ff2d7b3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1alpha1Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1alpha1Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FlowcontrolV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1alpha1Client { + return &FlowcontrolV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000000..db71d29a571 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + Update(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + UpdateStatus(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.FlowSchema, error) + List(opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + Body(flowSchema). + Do(). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + Body(flowSchema). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *flowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + Body(flowSchema). + Do(). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..065b5e6b42d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000000..eb99cca602c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + Update(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + UpdateStatus(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) + List(opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *priorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD index 5c3b8bab524..6cefd379d36 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = [ + "csinode.go", "doc.go", "generated_expansion.go", "storage_client.go", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go new file mode 100644 index 00000000000..209054175aa --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSINodesGetter has a method to return a CSINodeInterface. +// A group's client should implement this interface. +type CSINodesGetter interface { + CSINodes() CSINodeInterface +} + +// CSINodeInterface has methods to work with CSINode resources. +type CSINodeInterface interface { + Create(*v1.CSINode) (*v1.CSINode, error) + Update(*v1.CSINode) (*v1.CSINode, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.CSINode, error) + List(opts metav1.ListOptions) (*v1.CSINodeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) + CSINodeExpansion +} + +// cSINodes implements CSINodeInterface +type cSINodes struct { + client rest.Interface +} + +// newCSINodes returns a CSINodes +func newCSINodes(c *StorageV1Client) *cSINodes { + return &cSINodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *cSINodes) Get(name string, options metav1.GetOptions) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Get(). + Resource("csinodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CSINodeList{} + err = c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Create(cSINode *v1.CSINode) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Post(). + Resource("csinodes"). + Body(cSINode). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Update(cSINode *v1.CSINode) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Put(). + Resource("csinodes"). + Name(cSINode.Name). + Body(cSINode). + Do(). + Into(result) + return +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *cSINodes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("csinodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSINodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csinodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSINode. +func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Patch(pt). + Resource("csinodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD index 9bdd5a4edea..ba04417b9b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD @@ -9,6 +9,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "fake_csinode.go", "fake_storage_client.go", "fake_storageclass.go", "fake_volumeattachment.go", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go new file mode 100644 index 00000000000..87ce349a5c4 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + storagev1 "k8s.io/api/storage/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCSINodes implements CSINodeInterface +type FakeCSINodes struct { + Fake *FakeStorageV1 +} + +var csinodesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "csinodes"} + +var csinodesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSINode"} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *FakeCSINodes) Get(name string, options v1.GetOptions) (result *storagev1.CSINode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(csinodesResource, name), &storagev1.CSINode{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.CSINode), err +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *FakeCSINodes) List(opts v1.ListOptions) (result *storagev1.CSINodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &storagev1.CSINodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &storagev1.CSINodeList{ListMeta: obj.(*storagev1.CSINodeList).ListMeta} + for _, item := range obj.(*storagev1.CSINodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *FakeCSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *FakeCSINodes) Create(cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &storagev1.CSINode{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.CSINode), err +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *FakeCSINodes) Update(cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &storagev1.CSINode{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.CSINode), err +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *FakeCSINodes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(csinodesResource, name), &storagev1.CSINode{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(csinodesResource, listOptions) + + _, err := c.Fake.Invokes(action, &storagev1.CSINodeList{}) + return err +} + +// Patch applies the patch and returns the patched cSINode. +func (c *FakeCSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.CSINode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &storagev1.CSINode{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.CSINode), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index 967a528500e..f1c37a787e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -28,6 +28,10 @@ type FakeStorageV1 struct { *testing.Fake } +func (c *FakeStorageV1) CSINodes() v1.CSINodeInterface { + return &FakeCSINodes{c} +} + func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { return &FakeStorageClasses{c} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index ccac16114c8..d147620aef7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -18,6 +18,8 @@ limitations under the License. package v1 +type CSINodeExpansion interface{} + type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 1afbe93c9ba..822f0891446 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -26,6 +26,7 @@ import ( type StorageV1Interface interface { RESTClient() rest.Interface + CSINodesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -35,6 +36,10 @@ type StorageV1Client struct { restClient rest.Interface } +func (c *StorageV1Client) CSINodes() CSINodeInterface { + return newCSINodes(c) +} + func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/BUILD new file mode 100644 index 00000000000..3b5ac4b82da --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "endpointslice.go", + "expansion_generated.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/listers/discovery/v1beta1", + importpath = "k8s.io/client-go/listers/discovery/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000000..e7d1026ab72 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceLister helps list EndpointSlices. +type EndpointSliceLister interface { + // List lists all EndpointSlices in the indexer. + List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + // EndpointSlices returns an object that can list and get EndpointSlices. + EndpointSlices(namespace string) EndpointSliceNamespaceLister + EndpointSliceListerExpansion +} + +// endpointSliceLister implements the EndpointSliceLister interface. +type endpointSliceLister struct { + indexer cache.Indexer +} + +// NewEndpointSliceLister returns a new EndpointSliceLister. +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { + return &endpointSliceLister{indexer: indexer} +} + +// List lists all EndpointSlices in the indexer. +func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.EndpointSlice)) + }) + return ret, err +} + +// EndpointSlices returns an object that can list and get EndpointSlices. +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { + return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointSliceNamespaceLister helps list and get EndpointSlices. +type EndpointSliceNamespaceLister interface { + // List lists all EndpointSlices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + // Get retrieves the EndpointSlice from the indexer for a given namespace and name. + Get(name string) (*v1beta1.EndpointSlice, error) + EndpointSliceNamespaceListerExpansion +} + +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister +// interface. +type endpointSliceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EndpointSlices in the indexer for a given namespace. +func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.EndpointSlice)) + }) + return ret, err +} + +// Get retrieves the EndpointSlice from the indexer for a given namespace and name. +func (s endpointSliceNamespaceLister) Get(name string) (*v1beta1.EndpointSlice, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("endpointslice"), name) + } + return obj.(*v1beta1.EndpointSlice), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go new file mode 100644 index 00000000000..9619bbd8dd7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// EndpointSliceListerExpansion allows custom methods to be added to +// EndpointSliceLister. +type EndpointSliceListerExpansion interface{} + +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to +// EndpointSliceNamespaceLister. +type EndpointSliceNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/BUILD new file mode 100644 index 00000000000..40d01b283aa --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "expansion_generated.go", + "flowschema.go", + "prioritylevelconfiguration.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1", + importpath = "k8s.io/client-go/listers/flowcontrol/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..3e74051681c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000000..b6791336f92 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + Get(name string) (*v1alpha1.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1alpha1.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("flowschema"), name) + } + return obj.(*v1alpha1.FlowSchema), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000000..cb02129addc --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1alpha1.PriorityLevelConfiguration), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD index 495e3300fc1..e5257caa5fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = [ + "csinode.go", "expansion_generated.go", "storageclass.go", "volumeattachment.go", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/csinode.go new file mode 100644 index 00000000000..577f7285c9e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CSINodeLister helps list CSINodes. +type CSINodeLister interface { + // List lists all CSINodes in the indexer. + List(selector labels.Selector) (ret []*v1.CSINode, err error) + // Get retrieves the CSINode from the index for a given name. + Get(name string) (*v1.CSINode, error) + CSINodeListerExpansion +} + +// cSINodeLister implements the CSINodeLister interface. +type cSINodeLister struct { + indexer cache.Indexer +} + +// NewCSINodeLister returns a new CSINodeLister. +func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { + return &cSINodeLister{indexer: indexer} +} + +// List lists all CSINodes in the indexer. +func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CSINode)) + }) + return ret, err +} + +// Get retrieves the CSINode from the index for a given name. +func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("csinode"), name) + } + return obj.(*v1.CSINode), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go index 9d7d8887265..41efa3245a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -18,6 +18,10 @@ limitations under the License. package v1 +// CSINodeListerExpansion allows custom methods to be added to +// CSINodeLister. +type CSINodeListerExpansion interface{} + // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/BUILD index 705fdcec2d8..1ec6687a5aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/BUILD @@ -10,7 +10,7 @@ go_library( importpath = "k8s.io/client-go/metadata", visibility = ["//visibility:public"], deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go index db06cfd5e68..a94fe7a43f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go @@ -23,7 +23,7 @@ import ( "k8s.io/klog" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -63,7 +63,7 @@ func ConfigFor(inConfig *rest.Config) *rest.Config { config := rest.CopyConfig(inConfig) config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" config.ContentType = "application/vnd.kubernetes.protobuf" - config.NegotiatedSerializer = metainternalversion.Codecs.WithoutConversion() + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 94ef4b7337b..0e533e46576 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -51,11 +51,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/def.bzl b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/def.bzl index 9c018a4ef74..ecc9cd3bb09 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/def.bzl +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/def.bzl @@ -16,7 +16,7 @@ def version_x_defs(): # This should match the list of packages in kube::version::ldflag stamp_pkgs = [ - "k8s.io/kubernetes/pkg/version", + "k8s.io/component-base/version", # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here? "k8s.io/client-go/pkg/version", ] diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index b88902c1031..741729bb5d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -48,6 +48,7 @@ import ( ) const execInfoEnv = "KUBERNETES_EXEC_INFO" +const onRotateListWarningLength = 1000 var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) @@ -164,7 +165,7 @@ type Authenticator struct { cachedCreds *credentials exp time.Time - onRotate func() + onRotateList []func() } type credentials struct { @@ -191,7 +192,15 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext } d := connrotation.NewDialer(dial) - a.onRotate = d.CloseAll + + a.mu.Lock() + defer a.mu.Unlock() + a.onRotateList = append(a.onRotateList, d.CloseAll) + onRotateListLength := len(a.onRotateList) + if onRotateListLength > onRotateListWarningLength { + klog.Warningf("constructing many client instances from the same exec auth config can cause performance problems during cert rotation and can exhaust available network connections; %d clients constructed calling %q", onRotateListLength, a.cmd) + } + c.Dial = d.DialContext return nil @@ -353,8 +362,10 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err a.cachedCreds = newCreds // Only close all connections when TLS cert rotates. Token rotation doesn't // need the extra noise. - if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { - a.onRotate() + if len(a.onRotateList) > 0 && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { + for _, onRotate := range a.onRotateList { + onRotate() + } } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/rest/OWNERS index 49dabc61b00..c02ec6a250f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/OWNERS @@ -20,7 +20,6 @@ reviewers: - resouer - cjcullen - rmmh -- lixiaobing10051267 - asalkeld - juanvallejo - lojies diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go index 927403cb232..53c6abd3815 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go @@ -17,8 +17,6 @@ limitations under the License. package rest import ( - "fmt" - "mime" "net/http" "net/url" "os" @@ -51,6 +49,28 @@ type Interface interface { APIVersion() schema.GroupVersion } +// ClientContentConfig controls how RESTClient communicates with the server. +// +// TODO: ContentConfig will be updated to accept a Negotiator instead of a +// NegotiatedSerializer and NegotiatedSerializer will be removed. +type ClientContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server if + // AcceptContentTypes is not set, and as the default content type on any object + // sent to the server. If not set, "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. This is used as the default group version for VersionedParams. + GroupVersion schema.GroupVersion + // Negotiator is used for obtaining encoders and decoders for multiple + // supported media types. + Negotiator runtime.ClientNegotiator +} + // RESTClient imposes common Kubernetes API conventions on a set of resource paths. // The baseURL is expected to point to an HTTP or HTTPS path that is the parent // of one or more resources. The server should return a decodable API resource @@ -64,34 +84,27 @@ type RESTClient struct { // versionedAPIPath is a path segment connecting the base URL to the resource root versionedAPIPath string - // contentConfig is the information used to communicate with the server. - contentConfig ContentConfig - - // serializers contain all serializers for underlying content type. - serializers Serializers + // content describes how a RESTClient encodes and decodes responses. + content ClientContentConfig // creates BackoffManager that is passed to requests. createBackoffMgr func() BackoffManager - // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. - Throttle flowcontrol.RateLimiter + // rateLimiter is shared among all requests created by this client unless specifically + // overridden. + rateLimiter flowcontrol.RateLimiter // Set specific behavior of the client. If not set http.DefaultClient will be used. Client *http.Client } -type Serializers struct { - Encoder runtime.Encoder - Decoder runtime.Decoder - StreamingSerializer runtime.Serializer - Framer runtime.Framer - RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) -} - // NewRESTClient creates a new RESTClient. This client performs generic REST functions -// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and -// decoding of responses from the server. -func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { +// such as Get, Put, Post, and Delete on specified paths. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" @@ -99,31 +112,14 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConf base.RawQuery = "" base.Fragment = "" - if config.GroupVersion == nil { - config.GroupVersion = &schema.GroupVersion{} - } - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - serializers, err := createSerializers(config) - if err != nil { - return nil, err - } - - var throttle flowcontrol.RateLimiter - if maxQPS > 0 && rateLimiter == nil { - throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) - } else if rateLimiter != nil { - throttle = rateLimiter - } return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, - contentConfig: config, - serializers: *serializers, + content: config, createBackoffMgr: readExpBackoffConfig, - Throttle: throttle, - Client: client, + rateLimiter: rateLimiter, + + Client: client, }, nil } @@ -132,7 +128,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { if c == nil { return nil } - return c.Throttle + return c.rateLimiter } // readExpBackoffConfig handles the internal logic of determining what the @@ -153,58 +149,6 @@ func readExpBackoffConfig() BackoffManager { time.Duration(backoffDurationInt)*time.Second)} } -// createSerializers creates all necessary serializers for given contentType. -// TODO: the negotiated serializer passed to this method should probably return -// serializers that control decoding and versioning without this package -// being aware of the types. Depends on whether RESTClient must deal with -// generic infrastructure. -func createSerializers(config ContentConfig) (*Serializers, error) { - mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() - contentType := config.ContentType - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) - } - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, fmt.Errorf("no serializers registered for %s", contentType) - } - info = mediaTypes[0] - } - - internalGV := schema.GroupVersions{ - { - Group: config.GroupVersion.Group, - Version: runtime.APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: runtime.APIVersionInternal, - }, - } - - s := &Serializers{ - Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), - Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), - - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil - }, - } - if info.StreamSerializer != nil { - s.StreamingSerializer = info.StreamSerializer.Serializer - s.Framer = info.StreamSerializer.Framer - } - - return s, nil -} - // Verb begins a request with a verb (GET, POST, PUT, DELETE). // // Example usage of RESTClient's request building interface: @@ -219,12 +163,7 @@ func createSerializers(config ContentConfig) (*Serializers, error) { // list, ok := resp.(*api.PodList) // func (c *RESTClient) Verb(verb string) *Request { - backoff := c.createBackoffMgr() - - if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0) - } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout) + return NewRequest(c).Verb(verb) } // Post begins a POST request. Short for c.Verb("POST"). @@ -254,5 +193,5 @@ func (c *RESTClient) Delete() *Request { // APIVersion returns the APIVersion this RESTClient is expected to use. func (c *RESTClient) APIVersion() schema.GroupVersion { - return *c.contentConfig.GroupVersion + return c.content.GroupVersion } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go index fb81fb7b1cf..f58f518303b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go @@ -269,6 +269,9 @@ type ContentConfig struct { GroupVersion *schema.GroupVersion // NegotiatedSerializer is used for obtaining encoders and decoders for multiple // supported media types. + // + // TODO: NegotiatedSerializer will be phased out as internal clients are removed + // from Kubernetes. NegotiatedSerializer runtime.NegotiatedSerializer } @@ -283,14 +286,6 @@ func RESTClientFor(config *Config) (*RESTClient, error) { if config.NegotiatedSerializer == nil { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - qps := config.QPS - if config.QPS == 0.0 { - qps = DefaultQPS - } - burst := config.Burst - if config.Burst == 0 { - burst = DefaultBurst - } baseURL, versionedAPIPath, err := defaultServerUrlFor(config) if err != nil { @@ -310,7 +305,33 @@ func RESTClientFor(config *Config) (*RESTClient, error) { } } - return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) + rateLimiter := config.RateLimiter + if rateLimiter == nil { + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + if qps > 0 { + rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst) + } + } + + var gv schema.GroupVersion + if config.GroupVersion != nil { + gv = *config.GroupVersion + } + clientContent := ClientContentConfig{ + AcceptContentTypes: config.AcceptContentTypes, + ContentType: config.ContentType, + GroupVersion: gv, + Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv), + } + + return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) } // UnversionedRESTClientFor is the same as RESTClientFor, except that it allows @@ -338,13 +359,33 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { } } - versionConfig := config.ContentConfig - if versionConfig.GroupVersion == nil { - v := metav1.SchemeGroupVersion - versionConfig.GroupVersion = &v + rateLimiter := config.RateLimiter + if rateLimiter == nil { + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + if qps > 0 { + rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst) + } + } + + gv := metav1.SchemeGroupVersion + if config.GroupVersion != nil { + gv = *config.GroupVersion + } + clientContent := ClientContentConfig{ + AcceptContentTypes: config.AcceptContentTypes, + ContentType: config.ContentType, + GroupVersion: gv, + Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv), } - return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) + return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) } // SetKubernetesDefaults sets default values on the provided client config for accessing the diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go index 491f8bbd1e3..9e0c26110f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go @@ -48,7 +48,8 @@ import ( var ( // longThrottleLatency defines threshold for logging requests. All requests being - // throttle for more than longThrottleLatency will be logged. + // throttled (via the provided rateLimiter) for more than longThrottleLatency will + // be logged. longThrottleLatency = 50 * time.Millisecond ) @@ -74,19 +75,20 @@ func (r *RequestConstructionError) Error() string { return fmt.Sprintf("request construction error: '%v'", r.Err) } +var noBackoff = &NoBackoff{} + // Request allows for building up a request to a server in a chained fashion. // Any errors are stored until the end of your call, so you only have to // check once. type Request struct { - // required - client HTTPClient - verb string + c *RESTClient - baseURL *url.URL - content ContentConfig - serializers Serializers + rateLimiter flowcontrol.RateLimiter + backoff BackoffManager + timeout time.Duration // generic components accessible via method setters + verb string pathPrefix string subpath string params url.Values @@ -98,7 +100,6 @@ type Request struct { resource string resourceName string subresource string - timeout time.Duration // output err error @@ -106,42 +107,63 @@ type Request struct { // This is only used for per-request timeouts, deadlines, and cancellations. ctx context.Context - - backoffMgr BackoffManager - throttle flowcontrol.RateLimiter } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. -func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { +func NewRequest(c *RESTClient) *Request { + var backoff BackoffManager + if c.createBackoffMgr != nil { + backoff = c.createBackoffMgr() + } if backoff == nil { - klog.V(2).Infof("Not implementing request backoff strategy.") - backoff = &NoBackoff{} + backoff = noBackoff } - pathPrefix := "/" - if baseURL != nil { - pathPrefix = path.Join(pathPrefix, baseURL.Path) + var pathPrefix string + if c.base != nil { + pathPrefix = path.Join("/", c.base.Path, c.versionedAPIPath) + } else { + pathPrefix = path.Join("/", c.versionedAPIPath) } + + var timeout time.Duration + if c.Client != nil { + timeout = c.Client.Timeout + } + r := &Request{ - client: client, - verb: verb, - baseURL: baseURL, - pathPrefix: path.Join(pathPrefix, versionedAPIPath), - content: content, - serializers: serializers, - backoffMgr: backoff, - throttle: throttle, + c: c, + rateLimiter: c.rateLimiter, + backoff: backoff, timeout: timeout, + pathPrefix: pathPrefix, } + switch { - case len(content.AcceptContentTypes) > 0: - r.SetHeader("Accept", content.AcceptContentTypes) - case len(content.ContentType) > 0: - r.SetHeader("Accept", content.ContentType+", */*") + case len(c.content.AcceptContentTypes) > 0: + r.SetHeader("Accept", c.content.AcceptContentTypes) + case len(c.content.ContentType) > 0: + r.SetHeader("Accept", c.content.ContentType+", */*") } return r } +// NewRequestWithClient creates a Request with an embedded RESTClient for use in test scenarios. +func NewRequestWithClient(base *url.URL, versionedAPIPath string, content ClientContentConfig, client *http.Client) *Request { + return NewRequest(&RESTClient{ + base: base, + versionedAPIPath: versionedAPIPath, + content: content, + Client: client, + }) +} + +// Verb sets the verb this request will use. +func (r *Request) Verb(verb string) *Request { + r.verb = verb + return r +} + // Prefix adds segments to the relative beginning to the request path. These // items will be placed before the optional Namespace, Resource, or Name sections. // Setting AbsPath will clear any previously set Prefix segments @@ -184,17 +206,17 @@ func (r *Request) Resource(resource string) *Request { // or defaults to the stub implementation if nil is provided func (r *Request) BackOff(manager BackoffManager) *Request { if manager == nil { - r.backoffMgr = &NoBackoff{} + r.backoff = &NoBackoff{} return r } - r.backoffMgr = manager + r.backoff = manager return r } // Throttle receives a rate-limiter and sets or replaces an existing request limiter func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { - r.throttle = limiter + r.rateLimiter = limiter return r } @@ -272,8 +294,8 @@ func (r *Request) AbsPath(segments ...string) *Request { if r.err != nil { return r } - r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) - if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + r.pathPrefix = path.Join(r.c.base.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.c.base.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { // preserve any trailing slashes for legacy behavior r.pathPrefix += "/" } @@ -317,7 +339,7 @@ func (r *Request) Param(paramName, s string) *Request { // VersionedParams will not write query parameters that have omitempty set and are empty. If a // parameter has already been set it is appended to (Params and VersionedParams are additive). func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { - return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion) + return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion) } func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { @@ -397,14 +419,19 @@ func (r *Request) Body(obj interface{}) *Request { if reflect.ValueOf(t).IsNil() { return r } - data, err := runtime.Encode(r.serializers.Encoder, t) + encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil) + if err != nil { + r.err = err + return r + } + data, err := runtime.Encode(encoder, t) if err != nil { r.err = err return r } glogBody("Request Body", data) r.body = bytes.NewReader(data) - r.SetHeader("Content-Type", r.content.ContentType) + r.SetHeader("Content-Type", r.c.content.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) } @@ -433,8 +460,8 @@ func (r *Request) URL() *url.URL { } finalURL := &url.URL{} - if r.baseURL != nil { - *finalURL = *r.baseURL + if r.c.base != nil { + *finalURL = *r.c.base } finalURL.Path = p @@ -468,8 +495,8 @@ func (r Request) finalURLTemplate() url.URL { segments := strings.Split(r.URL().Path, "/") groupIndex := 0 index := 0 - if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) { - groupIndex += len(strings.Split(r.baseURL.Path, "/")) + if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) { + groupIndex += len(strings.Split(r.c.base.Path, "/")) } if groupIndex >= len(segments) { return *url @@ -522,16 +549,16 @@ func (r Request) finalURLTemplate() url.URL { } func (r *Request) tryThrottle() error { - if r.throttle == nil { + if r.rateLimiter == nil { return nil } now := time.Now() var err error if r.ctx != nil { - err = r.throttle.Wait(r.ctx) + err = r.rateLimiter.Wait(r.ctx) } else { - r.throttle.Accept() + r.rateLimiter.Accept() } if latency := time.Since(now); latency > longThrottleLatency { @@ -544,27 +571,11 @@ func (r *Request) tryThrottle() error { // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. func (r *Request) Watch() (watch.Interface, error) { - return r.WatchWithSpecificDecoders( - func(body io.ReadCloser) streaming.Decoder { - framer := r.serializers.Framer.NewFrameReader(body) - return streaming.NewDecoder(framer, r.serializers.StreamingSerializer) - }, - r.serializers.Decoder, - ) -} - -// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder. -// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content -// Returns a watch.Interface, or an error. -func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) { // We specifically don't want to rate limit watches, so we - // don't use r.throttle here. + // don't use r.rateLimiter here. if r.err != nil { return nil, r.err } - if r.serializers.Framer == nil { - return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) - } url := r.URL().String() req, err := http.NewRequest(r.verb, url, r.body) @@ -575,18 +586,18 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) req = req.WithContext(r.ctx) } req.Header = r.headers - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) resp, err := client.Do(req) updateURLMetrics(r, resp, err) - if r.baseURL != nil { + if r.c.base != nil { if err != nil { - r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + r.backoff.UpdateBackoff(r.c.base, err, 0) } else { - r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode) } } if err != nil { @@ -604,9 +615,22 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) } return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) } - wrapperDecoder := wrapperDecoderFn(resp.Body) + + contentType := resp.Header.Get("Content-Type") + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err) + } + objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params) + if err != nil { + return nil, err + } + + frameReader := framer.NewFrameReader(resp.Body) + watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer) + return watch.NewStreamWatcher( - restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder), + restclientwatch.NewDecoder(watchEventDecoder, objectDecoder), // use 500 to indicate that the cause of the error is unknown - other error codes // are more specific to HTTP interactions, and set a reason errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), @@ -617,8 +641,8 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) // It also handles corner cases for incomplete/invalid request data. func updateURLMetrics(req *Request, resp *http.Response, err error) { url := "none" - if req.baseURL != nil { - url = req.baseURL.Host + if req.c.base != nil { + url = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric @@ -656,18 +680,18 @@ func (r *Request) Stream() (io.ReadCloser, error) { req = req.WithContext(r.ctx) } req.Header = r.headers - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) resp, err := client.Do(req) updateURLMetrics(r, resp, err) - if r.baseURL != nil { + if r.c.base != nil { if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + r.backoff.UpdateBackoff(r.URL(), err, 0) } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) } } if err != nil { @@ -691,6 +715,33 @@ func (r *Request) Stream() (io.ReadCloser, error) { } } +// requestPreflightCheck looks for common programmer errors on Request. +// +// We tackle here two programmer mistakes. The first one is to try to create +// something(POST) using an empty string as namespace with namespaceSet as +// true. If namespaceSet is true then namespace should also be defined. The +// second mistake is, when under the same circumstances, the programmer tries +// to GET, PUT or DELETE a named resource(resourceName != ""), again, if +// namespaceSet is true then namespace must not be empty. +func (r *Request) requestPreflightCheck() error { + if !r.namespaceSet { + return nil + } + if len(r.namespace) > 0 { + return nil + } + + switch r.verb { + case "POST": + return fmt.Errorf("an empty namespace may not be set during creation") + case "GET", "PUT", "DELETE": + if len(r.resourceName) > 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + } + return nil +} + // request connects to the server and invokes the provided function when a server response is // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the @@ -707,15 +758,11 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { return r.err } - // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) - if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set when a resource name is provided") - } - if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set during creation") + if err := r.requestPreflightCheck(); err != nil { + return err } - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } @@ -742,11 +789,11 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { } req.Header = r.headers - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) if retries > 0 { // We are retrying the request that we already send to apiserver // at least once before. - // This request should also be throttled with the client-internal throttler. + // This request should also be throttled with the client-internal rate limiter. if err := r.tryThrottle(); err != nil { return err } @@ -754,9 +801,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { resp, err := client.Do(req) updateURLMetrics(r, resp, err) if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + r.backoff.UpdateBackoff(r.URL(), err, 0) } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) } if err != nil { // "Connection reset by peer" is usually a transient error. @@ -799,7 +846,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { } klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) - r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + r.backoff.Sleep(time.Duration(seconds) * time.Second) return false } fn(req, resp) @@ -815,8 +862,6 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // processing. // // Error type: -// * If the request can't be constructed, or an error happened earlier while building its -// arguments: *RequestConstructionError // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. func (r *Request) Do() Result { @@ -887,14 +932,18 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu glogBody("Response Body", body) // verify the content type is accurate + var decoder runtime.Decoder contentType := resp.Header.Get("Content-Type") - decoder := r.serializers.Decoder - if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) { + if len(contentType) == 0 { + contentType = r.c.content.ContentType + } + if len(contentType) > 0 { + var err error mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { return Result{err: errors.NewInternalError(err)} } - decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + decoder, err = r.c.content.Negotiator.Decoder(mediaType, params) if err != nil { // if we fail to negotiate a decoder, treat this as an unstructured error switch { @@ -1014,7 +1063,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, } var groupResource schema.GroupResource if len(r.resource) > 0 { - groupResource.Group = r.content.GroupVersion.Group + groupResource.Group = r.c.content.GroupVersion.Group groupResource.Resource = r.resource } return errors.NewGenericServerResponse( diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go index af062b3c635..f271c825929 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go @@ -22,23 +22,9 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" scheme "k8s.io/client-go/scale/scheme" ) -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.Selector = nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go index a684f2d535b..f11fcbd0094 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) + localSchemeBuilder.Register() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go index 16d53c697fc..02a36051e6a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go index f07de6bdae0..35d15c30d4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go @@ -22,23 +22,9 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" scheme "k8s.io/client-go/scale/scheme" ) -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus, - Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.Selector = nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go index 88de0893293..5e8a5d20067 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) + localSchemeBuilder.Register() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go index 1901be8cb1f..c31ad4b3539 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go index a775bcc225b..36ef82b92d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go @@ -22,24 +22,9 @@ import ( v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" scheme "k8s.io/client-go/scale/scheme" ) -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1_ScaleStatus, - Convert_v1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} - func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.Selector = "" diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go index b15701c4ff7..4339376c938 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) + localSchemeBuilder.Register() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go index 203f8b32367..9bc48695dde 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go index 1b6b9e61037..821eb33d7df 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go @@ -22,23 +22,9 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" scheme "k8s.io/client-go/scale/scheme" ) -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.Selector = nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go index aed1174e023..248a007127d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) + localSchemeBuilder.Register() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go index 99aef8653b2..5fd69a5a88d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD index fc8998248b6..44706ac0d93 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD @@ -40,6 +40,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go b/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go index 98f82326730..54f600ad3f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go @@ -248,7 +248,7 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list, nil } - matchingObjs, err := filterByNamespaceAndName(objs, ns, "") + matchingObjs, err := filterByNamespace(objs, ns) if err != nil { return nil, err } @@ -282,9 +282,19 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime return nil, errNotFound } - matchingObjs, err := filterByNamespaceAndName(objs, ns, name) - if err != nil { - return nil, err + var matchingObjs []runtime.Object + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if acc.GetNamespace() != ns { + continue + } + if acc.GetName() != name { + continue + } + matchingObjs = append(matchingObjs, obj) } if len(matchingObjs) == 0 { return nil, errNotFound @@ -472,10 +482,10 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return errors.NewNotFound(gvr.GroupResource(), name) } -// filterByNamespaceAndName returns all objects in the collection that -// match provided namespace and name. Empty namespace matches +// filterByNamespace returns all objects in the collection that +// match provided namespace. Empty namespace matches // non-namespaced objects. -func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime.Object, error) { +func filterByNamespace(objs []runtime.Object, ns string) ([]runtime.Object, error) { var res []runtime.Object for _, obj := range objs { @@ -486,9 +496,6 @@ func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime if ns != "" && acc.GetNamespace() != ns { continue } - if name != "" && acc.GetName() != name { - continue - } res = append(res, obj) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD index 3824d2a8174..5ce299323f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD @@ -21,15 +21,19 @@ go_test( "reflector_test.go", "shared_informer_test.go", "store_test.go", + "thread_safe_store_test.go", "undelta_store_test.go", ], embed = [":go_default_library"], race = "off", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -68,6 +72,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS index a8cd4b432bf..bd61bc76660 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -33,8 +33,6 @@ reviewers: - madhusudancs - hongchaodeng - krousey -- markturansky -- fgrzadkowski - xiang90 - mml - ingvagabund @@ -42,10 +40,6 @@ reviewers: - jessfraz - david-mcmahon - mfojtik -- '249043822' -- lixiaobing10051267 -- ddysher - mqliang -- feihujiang - sdminonne - ncdc diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index db4519f2e5d..55ecdcdf77e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta { return b } -// willObjectBeDeletedLocked returns true only if the last delta for the -// given object is Delete. Caller must lock first. -func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { - deltas := f.items[id] - return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted -} - // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { @@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err return KeyError{obj, err} } - // If object is supposed to be deleted (last event is Deleted), - // then we should ignore Sync events, because it would result in - // recreation of this object. - if actionType == Sync && f.willObjectBeDeletedLocked(id) { - return nil - } - newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -539,13 +525,6 @@ func (f *DeltaFIFO) Resync() error { return nil } -func (f *DeltaFIFO) syncKey(key string) error { - f.lock.Lock() - defer f.lock.Unlock() - - return f.syncKeyLocked(key) -} - func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/index.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/index.go index 9fa40683863..bbfb3b55f69 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/index.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/index.go @@ -56,7 +56,7 @@ type Indexer interface { type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns -// unique values for every object. This is conversion can create errors when more than one key is found. You +// unique values for every object. This conversion can create errors when more than one key is found. You // should prefer to make proper key and index functions. func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { return func(obj interface{}) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go index 8abde7131a3..1165c523eb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -29,7 +29,9 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -41,15 +43,22 @@ import ( "k8s.io/utils/trace" ) +const defaultExpectedTypeName = "" + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // metrics tracks basic metric information about the reflector - metrics *reflectorMetrics + // The name of the type we expect to place in the store. The name + // will be the stringification of expectedGVK if provided, and the + // stringification of expectedType otherwise. It is for display + // only, and should not be used for parsing or comparison. + expectedTypeName string // The type of object we expect to place in the store. expectedType reflect.Type + // The GVK of the object we expect to place in the store if unstructured. + expectedGVK *schema.GroupVersionKind // The destination to sync up with the watch source store Store // listerWatcher is used to perform lists and watches. @@ -65,6 +74,9 @@ type Reflector struct { // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store lastSyncResourceVersion string + // isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion + // failed with an HTTP 410 (Gone) status code. + isLastSyncResourceVersionGone bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex // WatchListPageSize is the requested chunk size of initial and resync watch lists. @@ -102,14 +114,35 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, name: name, listerWatcher: lw, store: store, - expectedType: reflect.TypeOf(expectedType), period: time.Second, resyncPeriod: resyncPeriod, clock: &clock.RealClock{}, } + r.setExpectedType(expectedType) return r } +func (r *Reflector) setExpectedType(expectedType interface{}) { + r.expectedType = reflect.TypeOf(expectedType) + if r.expectedType == nil { + r.expectedTypeName = defaultExpectedTypeName + return + } + + r.expectedTypeName = r.expectedType.String() + + if obj, ok := expectedType.(*unstructured.Unstructured); ok { + // Use gvk to check that watch event objects are of the desired type. + gvk := obj.GroupVersionKind() + if gvk.Empty() { + klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) + return + } + r.expectedGVK = &gvk + r.expectedTypeName = gvk.String() + } +} + // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} @@ -117,7 +150,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -129,9 +162,6 @@ var ( // nothing will ever be sent down this channel neverExitWatch <-chan time.Time = make(chan time.Time) - // Used to indicate that watching stopped so that a resync could happen. - errorResyncRequested = errors.New("resync channel fired") - // Used to indicate that watching stopped because of a signal from the stop // channel passed in from a client of the reflector. errorStopRequested = errors.New("Stop requested") @@ -155,13 +185,10 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) var resourceVersion string - // Explicitly set "0" as resource version - it's fine for the List() - // to be served from cache and potentially be delayed relative to - // etcd contents. Reflector framework will catch up via Watch() eventually. - options := metav1.ListOptions{ResourceVersion: "0"} + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} if err := func() error { initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) @@ -184,8 +211,17 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if r.WatchListPageSize != 0 { pager.PageSize = r.WatchListPageSize } - // Pager falls back to full list if paginated list calls fail due to an "Expired" error. + list, err = pager.List(context.Background(), options) + if isExpiredError(err) { + r.setIsLastSyncResourceVersionExpired(true) + // Retry immediately if the resource version used to list is expired. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, or the full list might fail because the + // resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all + // to recover and ensure the reflector makes forward progress. + list, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } close(listCh) }() select { @@ -196,8 +232,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { case <-listCh: } if err != nil { - return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err) } + r.setIsLastSyncResourceVersionExpired(false) // list was successful initTrace.Step("Objects listed") listMetaInterface, err := meta.ListAccessor(list) if err != nil { @@ -271,13 +308,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { w, err := r.listerWatcher.Watch(options) if err != nil { - switch err { - case io.EOF: + switch { + case isExpiredError(err): + r.setIsLastSyncResourceVersionExpired(true) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case err == io.EOF: // watch closed normally - case io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + case err == io.ErrUnexpectedEOF: + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) } // If this is "connection refused" error, it means that most likely apiserver is not responsive. // It doesn't make sense to re-list all objects because most likely we will be able to restart @@ -293,10 +333,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { switch { - case apierrs.IsResourceExpired(err): - klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + case isExpiredError(err): + r.setIsLastSyncResourceVersionExpired(true) + klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) } } return nil @@ -336,9 +377,17 @@ loop: if event.Type == watch.Error { return apierrs.FromObject(event.Object) } - if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) - continue + if r.expectedType != nil { + if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + continue + } + } + if r.expectedGVK != nil { + if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a)) + continue + } } meta, err := meta.Accessor(event.Object) if err != nil { @@ -380,7 +429,7 @@ loop: if watchDuration < 1*time.Second && eventCount == 0 { return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount) return nil } @@ -397,3 +446,42 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v } + +// relistResourceVersion determines the resource version the reflector should list or relist from. +// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource +// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted +// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in +// etcd via a quorum read. +func (r *Reflector) relistResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + + if r.isLastSyncResourceVersionGone { + // Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache + // if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector + // to the latest available ResourceVersion, using a consistent read from etcd. + return "" + } + if r.lastSyncResourceVersion == "" { + // For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to + // be served from the watch cache if it is enabled. + return "0" + } + return r.lastSyncResourceVersion +} + +// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a +// expired error: HTTP 410 (Gone) Status Code. +func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.isLastSyncResourceVersionGone = isExpired +} + +func isExpiredError(err error) bool { + // In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and + // apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone + // check when we fully drop support for Kubernetes 1.17 servers from reflectors. + return apierrs.IsResourceExpired(err) || apierrs.IsGone(err) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go index dd849c8fa1b..5c00115f59b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -47,19 +47,6 @@ func (noopMetric) Dec() {} func (noopMetric) Observe(float64) {} func (noopMetric) Set(float64) {} -type reflectorMetrics struct { - numberOfLists CounterMetric - listDuration SummaryMetric - numberOfItemsInList SummaryMetric - - numberOfWatches CounterMetric - numberOfShortWatches CounterMetric - watchDuration SummaryMetric - numberOfItemsInWatch SummaryMetric - - lastResourceVersion GaugeMetric -} - // MetricsProvider generates various metrics used by the reflector. type MetricsProvider interface { NewListsMetric(name string) CounterMetric diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go index c37423b6652..f59a0852fe0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -209,7 +209,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS // if the controller should shutdown // callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, + err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 33e6239a69a..e72325147b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -292,6 +292,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { set := index[indexValue] if set != nil { set.Delete(key) + + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } } } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 990a440c66d..1f1209f8d47 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -31,10 +31,12 @@ import ( type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional Kind string `json:"kind,omitempty"` // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions @@ -64,6 +66,7 @@ type Preferences struct { // Cluster contains information about how to communicate with a kubernetes cluster type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` @@ -84,6 +87,7 @@ type Cluster struct { // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // ClientCertificate is the path to a client cert file for TLS. // +optional @@ -132,6 +136,7 @@ type AuthInfo struct { // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD index 3ccb7f6f1ee..2685a08ecbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "register.go", "types.go", + "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/clientcmd/api/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go index 2d7142e6e12..0d27672e3d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -25,220 +25,138 @@ import ( "k8s.io/client-go/tools/clientcmd/api" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddConversionFuncs( - func(in *Cluster, out *api.Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Cluster, out *Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Preferences, out *api.Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Preferences, out *Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Context, out *api.Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Context, out *Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - - func(in *Config, out *api.Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make(map[string]*api.Cluster) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make(map[string]*api.AuthInfo) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make(map[string]*api.Context) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make(map[string]runtime.Object) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *api.Config, out *Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make([]NamedCluster, 0, 0) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make([]NamedAuthInfo, 0, 0) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make([]NamedContext, 0, 0) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make([]NamedExtension, 0, 0) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { - for _, curr := range *in { - newCluster := api.NewCluster() - if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { - return err - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newCluster - } else { - return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } - - return nil - }, - func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newCluster := (*in)[key] - oldCluster := &Cluster{} - if err := s.Convert(newCluster, oldCluster, 0); err != nil { - return err - } - - namedCluster := NamedCluster{key, *oldCluster} - *out = append(*out, namedCluster) - } +func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newCluster + } else { + return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + return nil +} - return nil - }, - func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { - for _, curr := range *in { - newAuthInfo := api.NewAuthInfo() - if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { - return err - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newAuthInfo - } else { - return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } +func Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := Cluster{} + if err := Convert_api_Cluster_To_v1_Cluster(newCluster, &oldCluster, s); err != nil { + return err + } + namedCluster := NamedCluster{key, oldCluster} + *out = append(*out, namedCluster) + } + return nil +} - return nil - }, - func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newAuthInfo := (*in)[key] - oldAuthInfo := &AuthInfo{} - if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { - return err - } - - namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} - *out = append(*out, namedAuthInfo) - } +func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newAuthInfo + } else { + return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + return nil +} - return nil - }, - func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { - for _, curr := range *in { - newContext := api.NewContext() - if err := s.Convert(&curr.Context, newContext, 0); err != nil { - return err - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newContext - } else { - return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } +func Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := AuthInfo{} + if err := Convert_api_AuthInfo_To_v1_AuthInfo(newAuthInfo, &oldAuthInfo, s); err != nil { + return err + } + namedAuthInfo := NamedAuthInfo{key, oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + return nil +} - return nil - }, - func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newContext := (*in)[key] - oldContext := &Context{} - if err := s.Convert(newContext, oldContext, 0); err != nil { - return err - } - - namedContext := NamedContext{key, *oldContext} - *out = append(*out, namedContext) - } +func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newContext + } else { + return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + return nil +} - return nil - }, - func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { - for _, curr := range *in { - var newExtension runtime.Object - if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { - return err - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newExtension - } else { - return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } +func Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := Context{} + if err := Convert_api_Context_To_v1_Context(newContext, &oldContext, s); err != nil { + return err + } + namedContext := NamedContext{key, oldContext} + *out = append(*out, namedContext) + } + return nil +} - return nil - }, - func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newExtension := (*in)[key] - oldExtension := &runtime.RawExtension{} - if err := s.Convert(newExtension, oldExtension, 0); err != nil { - return err - } - - namedExtension := NamedExtension{key, *oldExtension} - *out = append(*out, namedExtension) - } +func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newExtension + } else { + return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + return nil +} +func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := runtime.RawExtension{} + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { return nil - }, - ) + } + namedExtension := NamedExtension{key, oldExtension} + *out = append(*out, namedExtension) + } + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index cbf29ccf24d..ba5572ab07d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api // +k8s:deepcopy-gen=package package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go index 7b91d50908e..24f6284c574 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -37,7 +37,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addConversionFuncs) + localSchemeBuilder.Register(addKnownTypes) } func addKnownTypes(scheme *runtime.Scheme) error { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 56afb608a8c..2159ffc79d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -28,10 +28,12 @@ import ( type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional Kind string `json:"kind,omitempty"` // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..31e00ea6e4f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -0,0 +1,424 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/tools/clientcmd/api" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AuthInfo)(nil), (*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthInfo_To_api_AuthInfo(a.(*AuthInfo), b.(*api.AuthInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.AuthInfo)(nil), (*AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_AuthInfo_To_v1_AuthInfo(a.(*api.AuthInfo), b.(*AuthInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthProviderConfig)(nil), (*api.AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(a.(*AuthProviderConfig), b.(*api.AuthProviderConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.AuthProviderConfig)(nil), (*AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(a.(*api.AuthProviderConfig), b.(*AuthProviderConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Cluster_To_api_Cluster(a.(*Cluster), b.(*api.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_Cluster_To_v1_Cluster(a.(*api.Cluster), b.(*Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Config)(nil), (*api.Config)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Config_To_api_Config(a.(*Config), b.(*api.Config), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.Config)(nil), (*Config)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_Config_To_v1_Config(a.(*api.Config), b.(*Config), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Context)(nil), (*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Context_To_api_Context(a.(*Context), b.(*api.Context), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.Context)(nil), (*Context)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_Context_To_v1_Context(a.(*api.Context), b.(*Context), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecConfig)(nil), (*api.ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecConfig_To_api_ExecConfig(a.(*ExecConfig), b.(*api.ExecConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.ExecConfig)(nil), (*ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_ExecConfig_To_v1_ExecConfig(a.(*api.ExecConfig), b.(*ExecConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecEnvVar)(nil), (*api.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecEnvVar_To_api_ExecEnvVar(a.(*ExecEnvVar), b.(*api.ExecEnvVar), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.ExecEnvVar)(nil), (*ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_ExecEnvVar_To_v1_ExecEnvVar(a.(*api.ExecEnvVar), b.(*ExecEnvVar), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Preferences)(nil), (*api.Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Preferences_To_api_Preferences(a.(*Preferences), b.(*api.Preferences), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.Preferences)(nil), (*Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_Preferences_To_v1_Preferences(a.(*api.Preferences), b.(*Preferences), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]*api.AuthInfo)(nil), (*[]NamedAuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(a.(*map[string]*api.AuthInfo), b.(*[]NamedAuthInfo), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]*api.Cluster)(nil), (*[]NamedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(a.(*map[string]*api.Cluster), b.(*[]NamedCluster), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]*api.Context)(nil), (*[]NamedContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(a.(*map[string]*api.Context), b.(*[]NamedContext), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]runtime.Object)(nil), (*[]NamedExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(a.(*map[string]runtime.Object), b.(*[]NamedExtension), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]NamedAuthInfo)(nil), (*map[string]*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(a.(*[]NamedAuthInfo), b.(*map[string]*api.AuthInfo), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]NamedCluster)(nil), (*map[string]*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(a.(*[]NamedCluster), b.(*map[string]*api.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]NamedContext)(nil), (*map[string]*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(a.(*[]NamedContext), b.(*map[string]*api.Context), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]NamedExtension)(nil), (*map[string]runtime.Object)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(a.(*[]NamedExtension), b.(*map[string]runtime.Object), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + out.ClientCertificate = in.ClientCertificate + out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) + out.ClientKey = in.ClientKey + out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) + out.Token = in.Token + out.TokenFile = in.TokenFile + out.Impersonate = in.Impersonate + out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) + out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) + out.Username = in.Username + out.Password = in.Password + out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) + out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec)) + if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_v1_AuthInfo_To_api_AuthInfo is an autogenerated conversion function. +func Convert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return autoConvert_v1_AuthInfo_To_api_AuthInfo(in, out, s) +} + +func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + // INFO: in.LocationOfOrigin opted out of conversion generation + out.ClientCertificate = in.ClientCertificate + out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) + out.ClientKey = in.ClientKey + out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) + out.Token = in.Token + out.TokenFile = in.TokenFile + out.Impersonate = in.Impersonate + out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) + out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) + out.Username = in.Username + out.Password = in.Password + out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) + out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec)) + if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_api_AuthInfo_To_v1_AuthInfo is an autogenerated conversion function. +func Convert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return autoConvert_api_AuthInfo_To_v1_AuthInfo(in, out, s) +} + +func autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { + out.Name = in.Name + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + return nil +} + +// Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig is an autogenerated conversion function. +func Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { + return autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in, out, s) +} + +func autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { + out.Name = in.Name + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + return nil +} + +// Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig is an autogenerated conversion function. +func Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { + return autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in, out, s) +} + +func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { + out.Server = in.Server + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthority = in.CertificateAuthority + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Cluster_To_api_Cluster is an autogenerated conversion function. +func Convert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return autoConvert_v1_Cluster_To_api_Cluster(in, out, s) +} + +func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { + // INFO: in.LocationOfOrigin opted out of conversion generation + out.Server = in.Server + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthority = in.CertificateAuthority + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_api_Cluster_To_v1_Cluster is an autogenerated conversion function. +func Convert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_api_Cluster_To_v1_Cluster(in, out, s) +} + +func autoConvert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { + // INFO: in.Kind opted out of conversion generation + // INFO: in.APIVersion opted out of conversion generation + if err := Convert_v1_Preferences_To_api_Preferences(&in.Preferences, &out.Preferences, s); err != nil { + return err + } + if err := Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(&in.Clusters, &out.Clusters, s); err != nil { + return err + } + if err := Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { + return err + } + if err := Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(&in.Contexts, &out.Contexts, s); err != nil { + return err + } + out.CurrentContext = in.CurrentContext + if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Config_To_api_Config is an autogenerated conversion function. +func Convert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { + return autoConvert_v1_Config_To_api_Config(in, out, s) +} + +func autoConvert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { + // INFO: in.Kind opted out of conversion generation + // INFO: in.APIVersion opted out of conversion generation + if err := Convert_api_Preferences_To_v1_Preferences(&in.Preferences, &out.Preferences, s); err != nil { + return err + } + if err := Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(&in.Clusters, &out.Clusters, s); err != nil { + return err + } + if err := Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { + return err + } + if err := Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(&in.Contexts, &out.Contexts, s); err != nil { + return err + } + out.CurrentContext = in.CurrentContext + if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_api_Config_To_v1_Config is an autogenerated conversion function. +func Convert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { + return autoConvert_api_Config_To_v1_Config(in, out, s) +} + +func autoConvert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { + out.Cluster = in.Cluster + out.AuthInfo = in.AuthInfo + out.Namespace = in.Namespace + if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Context_To_api_Context is an autogenerated conversion function. +func Convert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { + return autoConvert_v1_Context_To_api_Context(in, out, s) +} + +func autoConvert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { + // INFO: in.LocationOfOrigin opted out of conversion generation + out.Cluster = in.Cluster + out.AuthInfo = in.AuthInfo + out.Namespace = in.Namespace + if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_api_Context_To_v1_Context is an autogenerated conversion function. +func Convert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { + return autoConvert_api_Context_To_v1_Context(in, out, s) +} + +func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { + out.Command = in.Command + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) + out.APIVersion = in.APIVersion + return nil +} + +// Convert_v1_ExecConfig_To_api_ExecConfig is an autogenerated conversion function. +func Convert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { + return autoConvert_v1_ExecConfig_To_api_ExecConfig(in, out, s) +} + +func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { + out.Command = in.Command + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) + out.APIVersion = in.APIVersion + return nil +} + +// Convert_api_ExecConfig_To_v1_ExecConfig is an autogenerated conversion function. +func Convert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { + return autoConvert_api_ExecConfig_To_v1_ExecConfig(in, out, s) +} + +func autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_ExecEnvVar_To_api_ExecEnvVar is an autogenerated conversion function. +func Convert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { + return autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in, out, s) +} + +func autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_api_ExecEnvVar_To_v1_ExecEnvVar is an autogenerated conversion function. +func Convert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { + return autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in, out, s) +} + +func autoConvert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { + out.Colors = in.Colors + if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Preferences_To_api_Preferences is an autogenerated conversion function. +func Convert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return autoConvert_v1_Preferences_To_api_Preferences(in, out, s) +} + +func autoConvert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { + out.Colors = in.Colors + if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { + return err + } + return nil +} + +// Convert_api_Preferences_To_v1_Preferences is an autogenerated conversion function. +func Convert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return autoConvert_api_Preferences_To_v1_Preferences(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 9c6ac3b5d2d..44115130d99 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -449,13 +449,15 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo) - // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data - // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + // * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set". + // * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence. caLen := len(config.overrides.ClusterInfo.CertificateAuthority) caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) - if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { - mergedClusterInfo.CertificateAuthority = "" - mergedClusterInfo.CertificateAuthorityData = nil + if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 { + mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify + mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority + mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData } return *mergedClusterInfo, nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/BUILD index a265bdc6ed0..d8a3e39c3f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "doc.go", "event_broadcaster.go", "event_recorder.go", "fake.go", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/doc.go new file mode 100644 index 00000000000..795582b0280 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package events has all client logic for recording and reporting +// "k8s.io/api/events/v1beta1".Event events. +package events // import "k8s.io/client-go/tools/events" diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go index 1459922b373..9d5afd9b43e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -53,7 +53,6 @@ type eventKey struct { action string reason string reportingController string - reportingInstance string regarding corev1.ObjectReference related corev1.ObjectReference } @@ -103,6 +102,10 @@ func newBroadcaster(sink EventSink, sleepDuration time.Duration, eventCache map[ } } +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + // refreshExistingEventSeries refresh events TTL func (e *eventBroadcasterImpl) refreshExistingEventSeries() { // TODO: Investigate whether lock contention won't be a problem @@ -264,7 +267,6 @@ func getKey(event *v1beta1.Event) eventKey { action: event.Action, reason: event.Reason, reportingController: event.ReportingController, - reportingInstance: event.ReportingInstance, regarding: event.Regarding, } if event.Related != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/interfaces.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/interfaces.go index 56c678aa870..4e39156bd2d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -33,6 +33,9 @@ type EventRecorder interface { // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used // to automate handling of events, so imagine people writing switch statements to handle them. // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). // 'note' is intended to be human readable. Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) } @@ -51,6 +54,9 @@ type EventBroadcaster interface { // NOTE: events received on your eventHandler should be copied before being used. // TODO: figure out if this can be removed. StartEventWatcher(eventHandler func(event runtime.Object)) func() + + // Shutdown shuts down the broadcaster + Shutdown() } // EventSink knows how to store events (client-go implements it.) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/OWNERS index 03ec598b326..44d93f84fa3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -12,4 +12,3 @@ reviewers: - timothysc - ingvagabund - resouer -- goltermann diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 4be650c0c82..42fffd45ae7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -53,9 +53,9 @@ limitations under the License. package leaderelection import ( + "bytes" "context" "fmt" - "reflect" "time" "k8s.io/apimachinery/pkg/api/errors" @@ -176,8 +176,9 @@ type LeaderCallbacks struct { type LeaderElector struct { config LeaderElectionConfig // internal bookkeeping - observedRecord rl.LeaderElectionRecord - observedTime time.Time + observedRecord rl.LeaderElectionRecord + observedRawRecord []byte + observedTime time.Time // used to implement OnNewLeader(), may lag slightly from the // value observedRecord.HolderIdentity if the transition has // not yet been reported. @@ -324,7 +325,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { } // 1. obtain or create the ElectionRecord - oldLeaderElectionRecord, err := le.config.Lock.Get() + oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get() if err != nil { if !errors.IsNotFound(err) { klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) @@ -340,8 +341,9 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { } // 2. Record obtained, check the Identity & Time - if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) { + if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { le.observedRecord = *oldLeaderElectionRecord + le.observedRawRecord = oldLeaderElectionRawRecord le.observedTime = le.clock.Now() } if len(oldLeaderElectionRecord.HolderIdentity) > 0 && @@ -365,6 +367,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { klog.Errorf("Failed to update lock: %v", err) return false } + le.observedRecord = leaderElectionRecord le.observedTime = le.clock.Now() return true diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD index b6e1f91a882..a6b0141b347 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD @@ -7,6 +7,7 @@ go_library( "endpointslock.go", "interface.go", "leaselock.go", + "multilock.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/leaderelection/resourcelock", importpath = "k8s.io/client-go/tools/leaderelection/resourcelock", @@ -14,6 +15,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/coordination/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 785356894f1..fd152b072cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -41,22 +41,23 @@ type ConfigMapLock struct { } // Get returns the election record from a ConfigMap Annotation -func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) { +func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, err } if cml.cm.Annotations == nil { cml.cm.Annotations = make(map[string]string) } - if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found { + recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey] + if found { if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err + return nil, nil, err } } - return &record, nil + return &record, []byte(recordBytes), nil } // Create attempts to create a LeaderElectionRecord annotation @@ -106,7 +107,7 @@ func (cml *ConfigMapLock) Describe() string { return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name) } -// returns the Identity of the lock +// Identity returns the Identity of the lock func (cml *ConfigMapLock) Identity() string { return cml.LockConfig.Identity } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index bfe5e8b1bb3..f5a8ffcc867 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -36,22 +36,23 @@ type EndpointsLock struct { } // Get returns the election record from a Endpoints Annotation -func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) { +func (el *EndpointsLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, err } if el.e.Annotations == nil { el.e.Annotations = make(map[string]string) } - if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found { + recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey] + if found { if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err + return nil, nil, err } } - return &record, nil + return &record, []byte(recordBytes), nil } // Create attempts to create a LeaderElectionRecord annotation @@ -81,6 +82,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { if err != nil { return err } + if el.e.Annotations == nil { + el.e.Annotations = make(map[string]string) + } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e) return err @@ -101,7 +105,7 @@ func (el *EndpointsLock) Describe() string { return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name) } -// returns the Identity of the lock +// Identity returns the Identity of the lock func (el *EndpointsLock) Identity() string { return el.LockConfig.Identity } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 050d41a25fa..c9f1759144c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -30,6 +30,8 @@ const ( EndpointsResourceLock = "endpoints" ConfigMapsResourceLock = "configmaps" LeasesResourceLock = "leases" + EndpointsLeasesResourceLock = "endpointsleases" + ConfigMapsLeasesResourceLock = "configmapsleases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -71,7 +73,7 @@ type ResourceLockConfig struct { // by the leaderelection code. type Interface interface { // Get returns the LeaderElectionRecord - Get() (*LeaderElectionRecord, error) + Get() (*LeaderElectionRecord, []byte, error) // Create attempts to create a LeaderElectionRecord Create(ler LeaderElectionRecord) error @@ -92,33 +94,46 @@ type Interface interface { // Manufacture will create a lock of a given type according to the input parameters func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) { + endpointsLock := &EndpointsLock{ + EndpointsMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coreClient, + LockConfig: rlc, + } + configmapLock := &ConfigMapLock{ + ConfigMapMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coreClient, + LockConfig: rlc, + } + leaseLock := &LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coordinationClient, + LockConfig: rlc, + } switch lockType { case EndpointsResourceLock: - return &EndpointsLock{ - EndpointsMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: coreClient, - LockConfig: rlc, - }, nil + return endpointsLock, nil case ConfigMapsResourceLock: - return &ConfigMapLock{ - ConfigMapMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: coreClient, - LockConfig: rlc, - }, nil + return configmapLock, nil case LeasesResourceLock: - return &LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: coordinationClient, - LockConfig: rlc, + return leaseLock, nil + case EndpointsLeasesResourceLock: + return &MultiLock{ + Primary: endpointsLock, + Secondary: leaseLock, + }, nil + case ConfigMapsLeasesResourceLock: + return &MultiLock{ + Primary: configmapLock, + Secondary: leaseLock, }, nil default: return nil, fmt.Errorf("Invalid lock-type %s", lockType) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 285f9440540..74016b8df15 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "encoding/json" "errors" "fmt" @@ -36,13 +37,18 @@ type LeaseLock struct { } // Get returns the election record from a Lease spec -func (ll *LeaseLock) Get() (*LeaderElectionRecord, error) { +func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { var err error ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, err } - return LeaseSpecToLeaderElectionRecord(&ll.lease.Spec), nil + record := LeaseSpecToLeaderElectionRecord(&ll.lease.Spec) + recordByte, err := json.Marshal(*record) + if err != nil { + return nil, nil, err + } + return record, recordByte, nil } // Create attempts to create a Lease @@ -84,31 +90,30 @@ func (ll *LeaseLock) Describe() string { return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name) } -// returns the Identity of the lock +// Identity returns the Identity of the lock func (ll *LeaseLock) Identity() string { return ll.LockConfig.Identity } func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord { - holderIdentity := "" + var r LeaderElectionRecord if spec.HolderIdentity != nil { - holderIdentity = *spec.HolderIdentity + r.HolderIdentity = *spec.HolderIdentity } - leaseDurationSeconds := 0 if spec.LeaseDurationSeconds != nil { - leaseDurationSeconds = int(*spec.LeaseDurationSeconds) + r.LeaseDurationSeconds = int(*spec.LeaseDurationSeconds) } - leaseTransitions := 0 if spec.LeaseTransitions != nil { - leaseTransitions = int(*spec.LeaseTransitions) + r.LeaderTransitions = int(*spec.LeaseTransitions) } - return &LeaderElectionRecord{ - HolderIdentity: holderIdentity, - LeaseDurationSeconds: leaseDurationSeconds, - AcquireTime: metav1.Time{spec.AcquireTime.Time}, - RenewTime: metav1.Time{spec.RenewTime.Time}, - LeaderTransitions: leaseTransitions, + if spec.AcquireTime != nil { + r.AcquireTime = metav1.Time{spec.AcquireTime.Time} } + if spec.RenewTime != nil { + r.RenewTime = metav1.Time{spec.RenewTime.Time} + } + return &r + } func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go new file mode 100644 index 00000000000..8cb89dc4f05 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcelock + +import ( + "bytes" + "encoding/json" + + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +const ( + UnknownLeader = "leaderelection.k8s.io/unknown" +) + +// MultiLock is used for lock's migration +type MultiLock struct { + Primary Interface + Secondary Interface +} + +// Get returns the older election record of the lock +func (ml *MultiLock) Get() (*LeaderElectionRecord, []byte, error) { + primary, primaryRaw, err := ml.Primary.Get() + if err != nil { + return nil, nil, err + } + + secondary, secondaryRaw, err := ml.Secondary.Get() + if err != nil { + // Lock is held by old client + if apierrors.IsNotFound(err) && primary.HolderIdentity != ml.Identity() { + return primary, primaryRaw, nil + } + return nil, nil, err + } + + if primary.HolderIdentity != secondary.HolderIdentity { + primary.HolderIdentity = UnknownLeader + primaryRaw, err = json.Marshal(primary) + if err != nil { + return nil, nil, err + } + } + return primary, ConcatRawRecord(primaryRaw, secondaryRaw), nil +} + +// Create attempts to create both primary lock and secondary lock +func (ml *MultiLock) Create(ler LeaderElectionRecord) error { + err := ml.Primary.Create(ler) + if err != nil && !apierrors.IsAlreadyExists(err) { + return err + } + return ml.Secondary.Create(ler) +} + +// Update will update and existing annotation on both two resources. +func (ml *MultiLock) Update(ler LeaderElectionRecord) error { + err := ml.Primary.Update(ler) + if err != nil { + return err + } + _, _, err = ml.Secondary.Get() + if err != nil && apierrors.IsNotFound(err) { + return ml.Secondary.Create(ler) + } + return ml.Secondary.Update(ler) +} + +// RecordEvent in leader election while adding meta-data +func (ml *MultiLock) RecordEvent(s string) { + ml.Primary.RecordEvent(s) + ml.Secondary.RecordEvent(s) +} + +// Describe is used to convert details on current resource lock +// into a string +func (ml *MultiLock) Describe() string { + return ml.Primary.Describe() +} + +// Identity returns the Identity of the lock +func (ml *MultiLock) Identity() string { + return ml.Primary.Identity() +} + +func ConcatRawRecord(primaryRaw, secondaryRaw []byte) []byte { + return bytes.Join([][]byte{primaryRaw, secondaryRaw}, []byte(",")) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/OWNERS index f150be53699..ad52d0c5cc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -5,5 +5,3 @@ reviewers: - eparis - krousey - jayunit100 -- fgrzadkowski -- tmrts diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go index d265db78683..307808be1e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go @@ -77,6 +77,7 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti if options.Limit == 0 { options.Limit = p.PageSize } + requestedResourceVersion := options.ResourceVersion var list *metainternalversion.List for { select { @@ -87,12 +88,18 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti obj, err := p.PageFn(ctx, options) if err != nil { - if !errors.IsResourceExpired(err) || !p.FullListIfExpired { + // Only fallback to full list if an "Expired" errors is returned, FullListIfExpired is true, and + // the "Expired" error occurred in page 2 or later (since full list is intended to prevent a pager.List from + // failing when the resource versions is established by the first page request falls out of the compaction + // during the subsequent list requests). + if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { return nil, err } - // the list expired while we were processing, fall back to a full list + // the list expired while we were processing, fall back to a full list at + // the requested ResourceVersion. options.Limit = 0 options.Continue = "" + options.ResourceVersion = requestedResourceVersion return p.PageFn(ctx, options) } m, err := meta.ListAccessor(obj) @@ -125,6 +132,10 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // set the next loop up options.Continue = m.GetContinue() + // Clear the ResourceVersion on the subsequent List calls to avoid the + // `specifying resource version is not allowed when using continue` error. + // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. + options.ResourceVersion = "" } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/doc.go index 657ddecbcdc..33d5fe78ee0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/doc.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package record has all client logic for recording and reporting events. +// Package record has all client logic for recording and reporting +// "k8s.io/api/core/v1".Event events. package record // import "k8s.io/client-go/tools/record" diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go index 7bd1ef5ed57..66f8bd634e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go @@ -127,6 +127,28 @@ type EventBroadcaster interface { // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder +// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. +type EventRecorderAdapter struct { + recorder EventRecorder +} + +// NewEventRecorderAdapter returns an adapter implementing the new +// "k8s.io/client-go/tools/events".EventRecorder interface. +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { + return &EventRecorderAdapter{ + recorder: recorder, + } +} + +// Eventf is a wrapper around v1 Eventf +func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + a.recorder.Eventf(regarding, eventtype, reason, note, args...) } // Creates a new event broadcaster. @@ -161,14 +183,18 @@ type eventBroadcasterImpl struct { // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. // The return value can be ignored or used to stop recording, if desired. // TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - eventCorrelator := NewEventCorrelatorWithOptions(eventBroadcaster.options) - return eventBroadcaster.StartEventWatcher( +func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + eventCorrelator := NewEventCorrelatorWithOptions(e.options) + return e.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, eventBroadcaster.sleepDuration) + recordToSink(sink, event, eventCorrelator, e.sleepDuration) }) } +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. @@ -249,8 +275,8 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv // StartLogging starts sending events received from this EventBroadcaster to the given logging function. // The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( +func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return e.StartEventWatcher( func(e *v1.Event) { logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) }) @@ -258,8 +284,8 @@ func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format stri // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. // The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := e.Watch() go func() { defer utilruntime.HandleCrash() for watchEvent := range watcher.ResultChan() { @@ -276,8 +302,8 @@ func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler fun } // NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} } type recorderImpl struct { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD index 661c5e35104..514a2572fc2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD @@ -21,6 +21,7 @@ go_library( "csr.go", "io.go", "pem.go", + "server_inspection.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/util/cert", importpath = "k8s.io/client-go/util/cert", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/io.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/io.go index 5efb2489487..35fde68a498 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/io.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/io.go @@ -72,7 +72,22 @@ func WriteCert(certPath string, data []byte) error { // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { - certs, err := CertsFromFile(filename) + pemBlock, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + pool, err := NewPoolFromBytes(pemBlock) + if err != nil { + return nil, fmt.Errorf("error creating pool from %s: %s", filename, err) + } + return pool, nil +} + +// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) { + certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/pem.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/pem.go index 9185e2e22d8..c77512315ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/pem.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,6 +17,7 @@ limitations under the License. package cert import ( + "bytes" "crypto/x509" "encoding/pem" "errors" @@ -59,3 +60,14 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { } return certs, nil } + +// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs. +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/server_inspection.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/server_inspection.go new file mode 100644 index 00000000000..f1ef292dee1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/server_inspection.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/url" + "strings" +) + +// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the +// state of particular servers. apiHost is "host:port" +func GetClientCANames(apiHost string) ([]string, error) { + // when we run this the second time, we know which one we are expecting + acceptableCAs := []string{} + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate + GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) { + acceptableCAs = []string{} + for _, curr := range hello.AcceptableCAs { + acceptableCAs = append(acceptableCAs, string(curr)) + } + return &tls.Certificate{}, nil + }, + } + + conn, err := tls.Dial("tcp", apiHost, tlsConfig) + if err != nil { + return nil, err + } + if err := conn.Close(); err != nil { + return nil, err + } + + return acceptableCAs, nil +} + +// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs +func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) { + apiserverURL, err := url.Parse(kubeConfigURL) + if err != nil { + return nil, err + } + return GetClientCANames(apiserverURL.Host) +} + +// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes. +// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port" +func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, // this is insecure so that we always get connected + } + // if a name is specified for SNI, set it. + if len(serverName) > 0 { + tlsConfig.ServerName = serverName + } + + conn, err := tls.Dial("tcp", apiHost, tlsConfig) + if err != nil { + return nil, nil, err + } + if err = conn.Close(); err != nil { + return nil, nil, fmt.Errorf("failed to close connection : %v", err) + } + + peerCerts := conn.ConnectionState().PeerCertificates + peerCertBytes := [][]byte{} + for _, a := range peerCerts { + actualCert, err := EncodeCertificates(a) + if err != nil { + return nil, nil, err + } + peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert)))) + } + + return peerCerts, peerCertBytes, err +} + +// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs +func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) { + apiserverURL, err := url.Parse(kubeConfigURL) + if err != nil { + return nil, nil, err + } + return GetServingCertificates(apiserverURL.Host, serverName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go index ef38791552d..55f5f5cbf6e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go @@ -113,8 +113,17 @@ type Config struct { // quickly replaced with a unique cert/key pair. BootstrapKeyPEM []byte // CertificateExpiration will record a metric that shows the remaining - // lifetime of the certificate. + // lifetime of the certificate. This metric is a gauge because only the + // current cert expiry time is really useful. Reading this metric at any + // time simply gives the next expiration date, no need to keep some + // history (histogram) of all previous expiry dates. CertificateExpiration Gauge + // CertificateRotation will record a metric showing the time in seconds + // that certificates lived before being rotated. This metric is a histogram + // because there is value in keeping a history of rotation cadences. It + // allows one to setup monitoring and alerting of unexpected rotation + // behavior and track trends in rotation frequency. + CertificateRotation Histogram } // Store is responsible for getting and updating the current certificate. @@ -139,6 +148,12 @@ type Gauge interface { Set(float64) } +// Histogram will record the time a rotated certificate was used before being +// rotated. +type Histogram interface { + Observe(float64) +} + // NoCertKeyError indicates there is no cert/key currently available. type NoCertKeyError string @@ -163,6 +178,7 @@ type manager struct { certStore Store certificateExpiration Gauge + certificateRotation Histogram // the following variables must only be accessed under certAccessLock certAccessLock sync.RWMutex @@ -174,6 +190,9 @@ type manager struct { clientFn CSRClientFunc stopCh chan struct{} stopped bool + + // Set to time.Now but can be stubbed out for testing + now func() time.Time } // NewManager returns a new certificate manager. A certificate manager is @@ -203,6 +222,8 @@ func NewManager(config *Config) (Manager, error) { cert: cert, forceRotation: forceRotation, certificateExpiration: config.CertificateExpiration, + certificateRotation: config.CertificateRotation, + now: time.Now, } return &m, nil @@ -215,7 +236,7 @@ func NewManager(config *Config) (Manager, error) { func (m *manager) Current() *tls.Certificate { m.certAccessLock.RLock() defer m.certAccessLock.RUnlock() - if m.cert != nil && m.cert.Leaf != nil && time.Now().After(m.cert.Leaf.NotAfter) { + if m.cert != nil && m.cert.Leaf != nil && m.now().After(m.cert.Leaf.NotAfter) { klog.V(2).Infof("Current certificate is expired.") return nil } @@ -256,7 +277,7 @@ func (m *manager) Start() { templateChanged := make(chan struct{}) go wait.Until(func() { deadline := m.nextRotationDeadline() - if sleepInterval := deadline.Sub(time.Now()); sleepInterval > 0 { + if sleepInterval := deadline.Sub(m.now()); sleepInterval > 0 { klog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) timer := time.NewTimer(sleepInterval) @@ -421,7 +442,10 @@ func (m *manager) rotateCerts() (bool, error) { return false, nil } - m.updateCached(cert) + if old := m.updateCached(cert); old != nil && m.certificateRotation != nil { + m.certificateRotation.Observe(m.now().Sub(old.Leaf.NotBefore).Seconds()) + } + return true, nil } @@ -490,14 +514,14 @@ func (m *manager) nextRotationDeadline() time.Time { // forceRotation is not protected by locks if m.forceRotation { m.forceRotation = false - return time.Now() + return m.now() } m.certAccessLock.RLock() defer m.certAccessLock.RUnlock() if !m.certSatisfiesTemplateLocked() { - return time.Now() + return m.now() } notAfter := m.cert.Leaf.NotAfter @@ -523,13 +547,15 @@ var jitteryDuration = func(totalDuration float64) time.Duration { return wait.Jitter(time.Duration(totalDuration), 0.2) - time.Duration(totalDuration*0.3) } -// updateCached sets the most recent retrieved cert. It also sets the server -// as assumed healthy. -func (m *manager) updateCached(cert *tls.Certificate) { +// updateCached sets the most recent retrieved cert and returns the old cert. +// It also sets the server as assumed healthy. +func (m *manager) updateCached(cert *tls.Certificate) *tls.Certificate { m.certAccessLock.Lock() defer m.certAccessLock.Unlock() m.serverHealth = true + old := m.cert m.cert = cert + return old } // updateServerError takes an error returned by the server and infers diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/retry/util.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/retry/util.go index c80ff0877fa..15e2722f304 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/retry/util.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/retry/util.go @@ -42,43 +42,64 @@ var DefaultBackoff = wait.Backoff{ Jitter: 0.1, } -// OnError executes the provided function repeatedly, retrying if the server returns a specified -// error. Callers should preserve previous executions if they wish to retry changes. It performs an -// exponential backoff. -// -// var pod *api.Pod -// err := retry.OnError(DefaultBackoff, errors.IsConflict, func() (err error) { -// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) -// return -// }) -// if err != nil { -// // may be conflict if max retries were hit -// return err -// } -// ... -// -// TODO: Make Backoff an interface? -func OnError(backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error { - var lastConflictErr error +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error { + var lastErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil - case errorFunc(err): - lastConflictErr = err + case retriable(err): + lastErr = err return false, nil default: return false, err } }) if err == wait.ErrWaitTimeout { - err = lastConflictErr + err = lastErr } return err } -// RetryOnConflict executes the function function repeatedly, retrying if the server returns a conflicting +// RetryOnConflict is used to make an update to a resource when you have to worry about +// conflicts caused by other code making unrelated updates to the resource at the same +// time. fn should fetch the resource to be modified, make appropriate changes to it, try +// to update it, and return (unmodified) the error from the update function. On a +// successful update, RetryOnConflict will return nil. If the update function returns a +// "Conflict" error, RetryOnConflict will wait some amount of time as described by +// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times +// and gives up, RetryOnConflict will return an error to the caller. +// +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err ! nil { +// return err +// } +// +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed +// +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... +// +// TODO: Make Backoff an interface? func RetryOnConflict(backoff wait.Backoff, fn func() error) error { return OnError(backoff, errors.IsConflict, fn) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 07327988625..e1ab76ea218 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -35,14 +35,17 @@ type DelayingInterface interface { // NewDelayingQueue constructs a new workqueue with delayed queuing ability func NewDelayingQueue() DelayingInterface { - return newDelayingQueue(clock.RealClock{}, "") + return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") } +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability func NewNamedDelayingQueue(name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, name) + return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) } -func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { +// NewDelayingQueueWithCustomClock constructs a new named workqueue +// with ability to inject real or fake clock for testing purposes +func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ Interface: NewNamed(name), clock: clock, @@ -178,6 +181,9 @@ func (q *delayingType) waitingLoop() { // Make a placeholder channel to use when there are no items in our list never := make(<-chan time.Time) + // Make a timer that expires when the item at the head of the waiting queue is ready + var nextReadyAtTimer clock.Timer + waitingForQueue := &waitForPriorityQueue{} heap.Init(waitingForQueue) @@ -205,8 +211,12 @@ func (q *delayingType) waitingLoop() { // Set up a wait for the first item's readyAt (if one exists) nextReadyAt := never if waitingForQueue.Len() > 0 { + if nextReadyAtTimer != nil { + nextReadyAtTimer.Stop() + } entry := waitingForQueue.Peek().(*waitFor) - nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) + nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) + nextReadyAt = nextReadyAtTimer.C() } select { diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS b/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS index 866b3cc8cf2..b3ed1ca6640 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS @@ -29,10 +29,6 @@ reviewers: - dims - krousey - rootfs -- jszczepkowski -- markturansky -- girishkalele -- jdef - freehan - jingxu97 - wlan0 diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod index bd30907726c..745757a09a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod @@ -8,18 +8,13 @@ require ( k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 - k8s.io/klog v0.4.0 - k8s.io/utils v0.0.0-20190801114015-581e00157fb1 + k8s.io/klog v1.0.0 + k8s.io/utils v0.0.0-20191114184206-e782cd3c129f ) replace ( - golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 - golang.org/x/lint => golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a - golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db - golang.org/x/time => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d + golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 + golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum index 118aef9c91e..8ccf452dc8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum @@ -38,11 +38,11 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -51,12 +51,13 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -65,8 +66,8 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -88,28 +89,34 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -117,24 +124,30 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -150,25 +163,27 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/BUILD b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/BUILD index 0624897d538..a1fc3a33de7 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/BUILD @@ -4,6 +4,8 @@ go_library( name = "go_default_library", srcs = [ "address.go", + "conditions.go", + "labels.go", "taints.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers", @@ -14,10 +16,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go new file mode 100644 index 00000000000..d96e751520f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "encoding/json" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" +) + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// SetNodeCondition updates specific node condition with patch operation. +func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { + condition.LastHeartbeatTime = metav1.NewTime(time.Now()) + patch, err := json.Marshal(map[string]interface{}{ + "status": map[string]interface{}{ + "conditions": []v1.NodeCondition{condition}, + }, + }) + if err != nil { + return err + } + _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) + return err +} diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go new file mode 100644 index 00000000000..80e77bb145c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "encoding/json" + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + clientretry "k8s.io/client-go/util/retry" + "k8s.io/klog" +) + +var updateLabelBackoff = wait.Backoff{ + Steps: 5, + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +// AddOrUpdateLabelsOnNode updates the labels on the node and returns true on +// success and false on failure. +func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[string]string, node *v1.Node) bool { + err := addOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate) + if err != nil { + utilruntime.HandleError( + fmt.Errorf( + "unable to update labels %+v for Node %q: %v", + labelsToUpdate, + node.Name, + err)) + return false + } + + klog.V(4).Infof("Updated labels %+v to Node %v", labelsToUpdate, node.Name) + return true +} + +func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, labelsToUpdate map[string]string) error { + firstTry := true + return clientretry.RetryOnConflict(updateLabelBackoff, func() error { + var err error + var node *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + // Make a copy of the node and update the labels. + newNode := node.DeepCopy() + if newNode.Labels == nil { + newNode.Labels = make(map[string]string) + } + for key, value := range labelsToUpdate { + newNode.Labels[key] = value + } + + oldData, err := json.Marshal(node) + if err != nil { + return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err) + } + newData, err := json.Marshal(newNode) + if err != nil { + return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create a two-way merge patch: %v", err) + } + if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + return fmt.Errorf("failed to patch the node: %v", err) + } + return nil + }) +} diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go index 524880d0d13..3f711d55037 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go @@ -115,3 +115,30 @@ func HasLBFinalizer(service *v1.Service) bool { } return false } + +// LoadBalancerStatusEqual checks if load balancer status are equal +func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/defaults.go index caeb869a37e..258e9db4570 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/defaults.go @@ -44,6 +44,7 @@ func RecommendedDefaultLeaderElectionConfiguration(obj *LeaderElectionConfigurat obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second} } if obj.ResourceLock == "" { + // TODO: Migrate to LeaseLock. obj.ResourceLock = EndpointsResourceLock } if obj.LeaderElect == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/types.go index a5202e3e9d2..c9d05525d43 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -59,10 +59,10 @@ type LeaderElectionConfiguration struct { // DebuggingConfiguration holds configuration for Debugging related features. type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ - EnableProfiling bool `json:"enableProfiling"` + EnableProfiling *bool `json:"enableProfiling,omitempty"` // enableContentionProfiling enables lock contention profiling, if // enableProfiling is true. - EnableContentionProfiling bool `json:"enableContentionProfiling"` + EnableContentionProfiling *bool `json:"enableContentionProfiling,omitempty"` } // ClientConnectionConfiguration contains details for constructing a client. diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go index c62ee734b89..4e4acf309ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go @@ -34,36 +34,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*ClientConnectionConfiguration)(nil), (*config.ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(a.(*ClientConnectionConfiguration), b.(*config.ClientConnectionConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ClientConnectionConfiguration)(nil), (*ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(a.(*config.ClientConnectionConfiguration), b.(*ClientConnectionConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*DebuggingConfiguration)(nil), (*config.DebuggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(a.(*DebuggingConfiguration), b.(*config.DebuggingConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DebuggingConfiguration)(nil), (*DebuggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(a.(*config.DebuggingConfiguration), b.(*DebuggingConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*LeaderElectionConfiguration)(nil), (*config.LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(a.(*LeaderElectionConfiguration), b.(*config.LeaderElectionConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.LeaderElectionConfiguration)(nil), (*LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(a.(*config.LeaderElectionConfiguration), b.(*LeaderElectionConfiguration), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*config.ClientConnectionConfiguration)(nil), (*ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(a.(*config.ClientConnectionConfiguration), b.(*ClientConnectionConfiguration), scope) }); err != nil { @@ -116,14 +86,22 @@ func autoConvert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnecti } func autoConvert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(in *DebuggingConfiguration, out *config.DebuggingConfiguration, s conversion.Scope) error { - out.EnableProfiling = in.EnableProfiling - out.EnableContentionProfiling = in.EnableContentionProfiling + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableContentionProfiling, &out.EnableContentionProfiling, s); err != nil { + return err + } return nil } func autoConvert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(in *config.DebuggingConfiguration, out *DebuggingConfiguration, s conversion.Scope) error { - out.EnableProfiling = in.EnableProfiling - out.EnableContentionProfiling = in.EnableContentionProfiling + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableContentionProfiling, &out.EnableContentionProfiling, s); err != nil { + return err + } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go index e747afa99b0..629bf65f9d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,16 @@ func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfigurati // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DebuggingConfiguration) DeepCopyInto(out *DebuggingConfiguration) { *out = *in + if in.EnableProfiling != nil { + in, out := &in.EnableProfiling, &out.EnableProfiling + *out = new(bool) + **out = **in + } + if in.EnableContentionProfiling != nil { + in, out := &in.EnableContentionProfiling, &out.EnableContentionProfiling + *out = new(bool) + **out = **in + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/BUILD new file mode 100644 index 00000000000..fe29ae9c5b9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/component-base/config/validation", + importpath = "k8s.io/component-base/config/validation", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/component-base/config:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/component-base/config:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/validation.go new file mode 100644 index 00000000000..4db15ea647d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/config/validation/validation.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/component-base/config" +) + +// ValidateClientConnectionConfiguration ensures validation of the ClientConnectionConfiguration struct +func ValidateClientConnectionConfiguration(cc *config.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if cc.Burst < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("burst"), cc.Burst, "must be non-negative")) + } + return allErrs +} + +// ValidateLeaderElectionConfiguration ensures validation of the LeaderElectionConfiguration struct +func ValidateLeaderElectionConfiguration(cc *config.LeaderElectionConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !cc.LeaderElect { + return allErrs + } + if cc.LeaseDuration.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("leaseDuration"), cc.LeaseDuration, "must be greater than zero")) + } + if cc.RenewDeadline.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("renewDeadline"), cc.RenewDeadline, "must be greater than zero")) + } + if cc.RetryPeriod.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("retryPeriod"), cc.RetryPeriod, "must be greater than zero")) + } + if cc.LeaseDuration.Duration < cc.RenewDeadline.Duration { + allErrs = append(allErrs, field.Invalid(fldPath.Child("leaseDuration"), cc.RenewDeadline, "LeaseDuration must be greater than RenewDeadline")) + } + if len(cc.ResourceLock) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceLock"), cc.ResourceLock, "resourceLock is required")) + } + if len(cc.ResourceNamespace) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceNamespace"), cc.ResourceNamespace, "resourceNamespace is required")) + } + if len(cc.ResourceName) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceName"), cc.ResourceName, "resourceName is required")) + } + return allErrs +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/BUILD index 0c742fe1ed9..27c2d5644fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/BUILD @@ -7,6 +7,7 @@ go_library( importpath = "k8s.io/component-base/featuregate", visibility = ["//visibility:public"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/naming:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/feature_gate.go b/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/feature_gate.go index 0243fb371a0..50e17622539 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/feature_gate.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/featuregate/feature_gate.go @@ -25,6 +25,8 @@ import ( "sync/atomic" "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/util/naming" "k8s.io/klog" ) @@ -38,17 +40,25 @@ const ( // AllAlpha=false,NewFeature=true will result in newFeature=true // AllAlpha=true,NewFeature=false will result in newFeature=false allAlphaGate Feature = "AllAlpha" + + // allBetaGate is a global toggle for beta features. Per-feature key + // values override the default set by allBetaGate. Examples: + // AllBeta=false,NewFeature=true will result in NewFeature=true + // AllBeta=true,NewFeature=false will result in NewFeature=false + allBetaGate Feature = "AllBeta" ) var ( // The generic features. defaultFeatures = map[Feature]FeatureSpec{ allAlphaGate: {Default: false, PreRelease: Alpha}, + allBetaGate: {Default: false, PreRelease: Beta}, } // Special handling for a few gates. specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ allAlphaGate: setUnsetAlphaGates, + allBetaGate: setUnsetBetaGates, } ) @@ -103,6 +113,8 @@ type MutableFeatureGate interface { // featureGate implements FeatureGate as well as pflag.Value for flag parsing. type featureGate struct { + featureGateName string + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) // lock guards writes to known, enabled, and reads/writes of closed @@ -125,9 +137,23 @@ func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, } } +func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Beta { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + // Set, String, and Type implement pflag.Value var _ pflag.Value = &featureGate{} +// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common +// call chains, so they'd be unhelpful as names. +var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"} + func NewFeatureGate() *featureGate { known := map[Feature]FeatureSpec{} for k, v := range defaultFeatures { @@ -142,9 +168,10 @@ func NewFeatureGate() *featureGate { enabledValue.Store(enabled) f := &featureGate{ - known: knownValue, - special: specialFeatures, - enabled: enabledValue, + featureGateName: naming.GetNameFromCallsite(internalPackages...), + known: knownValue, + special: specialFeatures, + enabled: enabledValue, } return f } @@ -263,12 +290,16 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { return nil } -// Enabled returns true if the key is enabled. +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. func (f *featureGate) Enabled(key Feature) bool { if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { return v } - return f.known.Load().(map[Feature]FeatureSpec)[key].Default + if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok { + return v.Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)) } // AddFlag adds a flag for setting global feature gates to the specified FlagSet. diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD index 55a4e605fe1..0d6a7701ae8 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD @@ -1,34 +1,34 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) -load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "collector.go", "counter.go", + "desc.go", "gauge.go", "histogram.go", + "http.go", + "labels.go", "metric.go", "opts.go", "processstarttime.go", "registry.go", "summary.go", + "value.go", "version.go", "version_parser.go", "wrappers.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/component-base/metrics", importpath = "k8s.io/component-base/metrics", + visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/prometheus/procfs:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -38,9 +38,11 @@ go_library( go_test( name = "go_default_test", srcs = [ + "collector_test.go", "counter_test.go", "gauge_test.go", "histogram_test.go", + "opts_test.go", "registry_test.go", "summary_test.go", "version_parser_test.go", @@ -50,6 +52,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/testutil:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], @@ -68,8 +72,39 @@ filegroup( ":package-srcs", "//staging/src/k8s.io/component-base/metrics/legacyregistry:all-srcs", "//staging/src/k8s.io/component-base/metrics/prometheus/clientgo:all-srcs", + "//staging/src/k8s.io/component-base/metrics/prometheus/ratelimiter:all-srcs", "//staging/src/k8s.io/component-base/metrics/prometheus/restclient:all-srcs", + "//staging/src/k8s.io/component-base/metrics/prometheus/version:all-srcs", "//staging/src/k8s.io/component-base/metrics/prometheus/workqueue:all-srcs", + "//staging/src/k8s.io/component-base/metrics/testutil:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +# Packages allowed to import github.com/prometheus +package_group( + name = "prometheus_import_allow_list", + packages = [ + "//cluster/images/etcd-version-monitor", + "//pkg/controller/endpointslice", + "//pkg/controller/volume/attachdetach/metrics", + "//pkg/controller/volume/persistentvolume/metrics", + "//pkg/kubelet/metrics/collectors", + "//pkg/kubelet/pleg", + "//pkg/kubelet/pluginmanager/metrics", + "//pkg/kubelet/server", + "//pkg/kubelet/server/stats", + "//pkg/kubelet/volumemanager/metrics", + "//pkg/master", + "//pkg/scheduler/framework/v1alpha1", + "//pkg/volume/util/operationexecutor", + "//staging/src/k8s.io/apiserver/pkg/admission/metrics", + "//staging/src/k8s.io/apiserver/pkg/util/flowcontrol/metrics", + "//staging/src/k8s.io/component-base/metrics/...", + "//test/e2e", + "//test/e2e/storage", + "//test/e2e_node", + "//vendor/...", + ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/OWNERS b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/OWNERS index 6d2418a175c..46c94893ab5 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/OWNERS @@ -1,12 +1,9 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- piosz -- brancz +- sig-instrumentation-approvers - logicalhan reviewers: -- piosz -- brancz -- logicalhan +- sig-instrumentation-reviewers labels: - sig/instrumentation diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/collector.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 00000000000..f720249068c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool +} + +// BaseStableCollector which implements almost all of the methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommend that inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors []*Desc // stores all Desc collected from DescribeWithStability(). + registrable []*Desc // stores registrable Desc(not be hidden), is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intend to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registrable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + bsc.descriptors = append(bsc.descriptors, d) +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + if version != nil { + d.determineDeprecationStatus(*version) + } + + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + if d.IsHidden() { + // do nothing for hidden metrics + } else if d.IsDeprecated() { + d.initializeDeprecatedDesc() + bsc.registrable = append(bsc.registrable, d) + d.isCreated = true + } else { + d.initialize() + bsc.registrable = append(bsc.registrable, d) + d.isCreated = true + } + }) + } + + if len(bsc.registrable) > 0 { + return true + } + + return false +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/counter.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/counter.go index 17386ed5a84..d641b078299 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/counter.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/counter.go @@ -34,10 +34,8 @@ type Counter struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewCounter(opts *CounterOpts) *Counter { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + kc := &Counter{ CounterOpts: opts, lazyMetric: lazyMetric{}, @@ -86,10 +84,8 @@ type CounterVec struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + cv := &CounterVec{ CounterVec: noopCounterVec, CounterOpts: opts, @@ -143,7 +139,7 @@ func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new Counter is created IFF the counterVec has // been registered to a metrics registry. -func (v *CounterVec) With(labels prometheus.Labels) CounterMetric { +func (v *CounterVec) With(labels map[string]string) CounterMetric { if !v.IsCreated() { return noop // return no-op counter } @@ -157,9 +153,18 @@ func (v *CounterVec) With(labels prometheus.Labels) CounterMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *CounterVec) Delete(labels prometheus.Labels) bool { +func (v *CounterVec) Delete(labels map[string]string) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } return v.CounterVec.Delete(labels) } + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/desc.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 00000000000..2d4b0793ec1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,173 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strings" + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.help = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.help) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.help = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.help) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + // remove stability from help if any + stabilityStr := fmt.Sprintf("[%v] ", d.stabilityLevel) + rawHelp := strings.Replace(d.help, stabilityStr, "", -1) + + return NewDesc(d.fqName, rawHelp, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/gauge.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/gauge.go index 82b982d9d6c..c9c187b7b00 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/gauge.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/gauge.go @@ -19,6 +19,8 @@ package metrics import ( "github.com/blang/semver" "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" ) // Gauge is our internal representation for our wrapping struct around prometheus @@ -34,10 +36,8 @@ type Gauge struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewGauge(opts *GaugeOpts) *Gauge { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + kc := &Gauge{ GaugeOpts: opts, lazyMetric: lazyMetric{}, @@ -86,10 +86,8 @@ type GaugeVec struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + cv := &GaugeVec{ GaugeVec: noopGaugeVec, GaugeOpts: opts, @@ -142,7 +140,7 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has // been registered to a metrics registry. -func (v *GaugeVec) With(labels prometheus.Labels) GaugeMetric { +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { if !v.IsCreated() { return noop // return no-op gauge } @@ -156,9 +154,40 @@ func (v *GaugeVec) With(labels prometheus.Labels) GaugeMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *GaugeVec) Delete(labels prometheus.Labels) bool { +func (v *GaugeVec) Delete(labels map[string]string) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } return v.GaugeVec.Delete(labels) } + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(&opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/histogram.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/histogram.go index 38d2d416477..1a4bbda813e 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/histogram.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/histogram.go @@ -21,6 +21,19 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + // Histogram is our internal representation for our wrapping struct around prometheus // histograms. Summary implements both kubeCollector and ObserverMetric type Histogram struct { @@ -33,10 +46,8 @@ type Histogram struct { // NewHistogram returns an object which is Histogram-like. However, nothing // will be measured until the histogram is registered somewhere. func NewHistogram(opts *HistogramOpts) *Histogram { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + h := &Histogram{ HistogramOpts: opts, lazyMetric: lazyMetric{}, @@ -85,10 +96,8 @@ type HistogramVec struct { // prometheus.HistogramVec object. However, the object returned will not measure // anything unless the collector is first registered, since the metric is lazily instantiated. func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + v := &HistogramVec{ HistogramVec: noopHistogramVec, HistogramOpts: opts, @@ -137,7 +146,7 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has // been registered to a metrics registry. -func (v *HistogramVec) With(labels prometheus.Labels) ObserverMetric { +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { if !v.IsCreated() { return noop } @@ -151,9 +160,18 @@ func (v *HistogramVec) With(labels prometheus.Labels) ObserverMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *HistogramVec) Delete(labels prometheus.Labels) bool { +func (v *HistogramVec) Delete(labels map[string]string) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } return v.HistogramVec.Delete(labels) } + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/http.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 00000000000..ecf722e9144 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/labels.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 00000000000..11af3ae4249 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go index bc08e3c8904..e48a4a771f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -17,16 +17,18 @@ limitations under the License. package legacyregistry import ( + "net/http" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/component-base/metrics" - "net/http" ) var ( defaultRegistry = metrics.NewKubeRegistry() // DefaultGatherer exposes the global registry gatherer - DefaultGatherer prometheus.Gatherer = defaultRegistry + DefaultGatherer metrics.Gatherer = defaultRegistry ) func init() { @@ -41,24 +43,18 @@ func init() { // Deprecated: Please note the issues described in the doc comment of // InstrumentHandler. You might want to consider using promhttp.Handler instead. func Handler() http.Handler { - return prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{})) + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{})) } // Register registers a collectable metric but uses the global registry func Register(c metrics.Registerable) error { err := defaultRegistry.Register(c) - // sideload global prom registry as fallback - prometheus.Register(c) return err } // MustRegister registers registerable metrics but uses the global registry. func MustRegister(cs ...metrics.Registerable) { defaultRegistry.MustRegister(cs...) - // sideload global prom registry as fallback - for _, c := range cs { - prometheus.Register(c) - } } // RawMustRegister registers prometheus collectors but uses the global registry, this @@ -67,10 +63,6 @@ func MustRegister(cs ...metrics.Registerable) { // Deprecated func RawMustRegister(cs ...prometheus.Collector) { defaultRegistry.RawMustRegister(cs...) - // sideload global prom registry as fallback - for _, c := range cs { - prometheus.Register(c) - } } // RawRegister registers a prometheus collector but uses the global registry, this @@ -79,7 +71,24 @@ func RawMustRegister(cs ...prometheus.Collector) { // Deprecated func RawRegister(c prometheus.Collector) error { err := defaultRegistry.RawRegister(c) - // sideload global prom registry as fallback - prometheus.Register(c) return err } + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go index 0337829e911..10398e396b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go @@ -17,11 +17,13 @@ limitations under the License. package metrics import ( + "sync" + "github.com/blang/semver" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "k8s.io/klog" - "sync" ) /* @@ -99,8 +101,9 @@ func (r *lazyMetric) determineDeprecationStatus(version semver.Version) { klog.Warningf("Hidden metrics have been manually overridden, showing this very deprecated metric.") return } - if selfVersion.LT(version) { - klog.Warningf("This metric has been deprecated for more than one release, hiding.") + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") r.isHidden = true } }) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/opts.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/opts.go index 4fd1048b167..6b4a290d27d 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/opts.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/opts.go @@ -18,9 +18,10 @@ package metrics import ( "fmt" - "github.com/prometheus/client_golang/prometheus" "sync" "time" + + "github.com/prometheus/client_golang/prometheus" ) // KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure @@ -34,13 +35,24 @@ type KubeOpts struct { Subsystem string Name string Help string - ConstLabels prometheus.Labels + ConstLabels map[string]string DeprecatedVersion string deprecateOnce sync.Once annotateOnce sync.Once StabilityLevel StabilityLevel } +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + // StabilityLevel represents the API guarantees for a given defined metric. type StabilityLevel string @@ -53,6 +65,16 @@ const ( STABLE StabilityLevel = "STABLE" ) +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts KubeOpts @@ -122,7 +144,7 @@ type HistogramOpts struct { Subsystem string Name string Help string - ConstLabels prometheus.Labels + ConstLabels map[string]string Buckets []float64 DeprecatedVersion string deprecateOnce sync.Once @@ -168,7 +190,7 @@ type SummaryOpts struct { Subsystem string Name string Help string - ConstLabels prometheus.Labels + ConstLabels map[string]string Objectives map[float64]float64 MaxAge time.Duration AgeBuckets uint32 @@ -194,16 +216,28 @@ func (o *SummaryOpts) annotateStabilityLevel() { }) } +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + // convenience function to allow easy transformation to the prometheus // counterpart. This will do more once we have a proper label abstraction func (o SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } return prometheus.SummaryOpts{ Namespace: o.Namespace, Subsystem: o.Subsystem, Name: o.Name, Help: o.Help, ConstLabels: o.ConstLabels, - Objectives: o.Objectives, + Objectives: objectives, MaxAge: o.MaxAge, AgeBuckets: o.AgeBuckets, BufCap: o.BufCap, diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/processstarttime.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/processstarttime.go index 70912798532..56c5a36d63e 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/processstarttime.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -17,11 +17,13 @@ limitations under the License. package metrics import ( + "os" + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" + "k8s.io/klog" - "os" - "time" ) var processStartTime = prometheus.NewGaugeVec( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/BUILD similarity index 57% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD rename to cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/BUILD index 40b04cbf373..4b43a0b0e1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/BUILD @@ -7,11 +7,13 @@ load( go_library( name = "go_default_library", - srcs = ["prometheus.go"], - importpath = "k8s.io/kubernetes/pkg/client/metrics/prometheus", + srcs = ["metrics.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/component-base/metrics/prometheus/restclient", + importpath = "k8s.io/component-base/metrics/prometheus/restclient", deps = [ "//staging/src/k8s.io/client-go/tools/metrics:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go similarity index 66% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go rename to cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go index c7804dfc2c1..ac0c9eb3319 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go @@ -14,43 +14,42 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package prometheus creates and registers prometheus metrics with -// rest clients. To use this package, you just have to import it. -package prometheus +package restclient import ( "net/url" "time" "k8s.io/client-go/tools/metrics" - - "github.com/prometheus/client_golang/prometheus" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) var ( // requestLatency is a Prometheus Summary metric type partitioned by // "verb" and "url" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + requestLatency = k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ Name: "rest_client_request_duration_seconds", Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + Buckets: k8smetrics.ExponentialBuckets(0.001, 2, 10), }, []string{"verb", "url"}, ) // deprecatedRequestLatency is deprecated, please use requestLatency. - deprecatedRequestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_latency_seconds", - Help: "(Deprecated) Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + deprecatedRequestLatency = k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ + Name: "rest_client_request_latency_seconds", + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: k8smetrics.ExponentialBuckets(0.001, 2, 10), + DeprecatedVersion: "1.14.0", }, []string{"verb", "url"}, ) - requestResult = prometheus.NewCounterVec( - prometheus.CounterOpts{ + requestResult = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Name: "rest_client_requests_total", Help: "Number of HTTP requests, partitioned by status code, method, and host.", }, @@ -59,15 +58,15 @@ var ( ) func init() { - prometheus.MustRegister(requestLatency) - prometheus.MustRegister(deprecatedRequestLatency) - prometheus.MustRegister(requestResult) + legacyregistry.MustRegister(requestLatency) + legacyregistry.MustRegister(deprecatedRequestLatency) + legacyregistry.MustRegister(requestResult) metrics.Register(&latencyAdapter{m: requestLatency, dm: deprecatedRequestLatency}, &resultAdapter{requestResult}) } type latencyAdapter struct { - m *prometheus.HistogramVec - dm *prometheus.HistogramVec + m *k8smetrics.HistogramVec + dm *k8smetrics.HistogramVec } func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { @@ -76,7 +75,7 @@ func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) } type resultAdapter struct { - m *prometheus.CounterVec + m *k8smetrics.CounterVec } func (r *resultAdapter) Increment(code, method, host string) { diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go index 701f151999b..fa1854bd69c 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go @@ -17,6 +17,7 @@ limitations under the License. package metrics import ( + "fmt" "sync" "sync/atomic" @@ -33,6 +34,44 @@ var ( showHidden atomic.Value ) +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + // SetShowHidden will enable showing hidden metrics. This will no-opt // after the initial call func SetShowHidden() { @@ -62,6 +101,8 @@ type KubeRegistry interface { RawRegister(prometheus.Collector) error // Deprecated RawMustRegister(...prometheus.Collector) + CustomRegister(c StableCollector) error + CustomMustRegister(cs ...StableCollector) Register(Registerable) error MustRegister(...Registerable) Unregister(Registerable) bool @@ -101,6 +142,29 @@ func (kr *kubeRegistry) MustRegister(cs ...Registerable) { kr.PromRegistry.MustRegister(metrics...) } +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + collectors = append(collectors, c) + } + } + + kr.PromRegistry.MustRegister(collectors...) +} + // RawRegister takes a native prometheus.Collector and registers the collector // to the registry. This bypasses metrics safety checks, so should only be used // to register custom prometheus collectors. @@ -148,14 +212,9 @@ func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { return r } -func registerMetadataMetrics(r *kubeRegistry) { - RegisterBuildInfo(r) -} - // NewKubeRegistry creates a new vanilla Registry without any Collectors // pre-registered. func NewKubeRegistry() KubeRegistry { r := newKubeRegistry(version.Get()) - registerMetadataMetrics(r) return r } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/summary.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/summary.go index ff573782245..3d8da006459 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/summary.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/summary.go @@ -37,10 +37,8 @@ type Summary struct { // // DEPRECATED: as per the metrics overhaul KEP func NewSummary(opts *SummaryOpts) *Summary { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + s := &Summary{ SummaryOpts: opts, lazyMetric: lazyMetric{}, @@ -93,10 +91,8 @@ type SummaryVec struct { // // DEPRECATED: as per the metrics overhaul KEP func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { - // todo: handle defaulting better - if opts.StabilityLevel == "" { - opts.StabilityLevel = ALPHA - } + opts.StabilityLevel.setDefaults() + v := &SummaryVec{ SummaryOpts: opts, originalLabels: labels, @@ -144,7 +140,7 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new ObserverMetric is created IFF the summaryVec has // been registered to a metrics registry. -func (v *SummaryVec) With(labels prometheus.Labels) ObserverMetric { +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { if !v.IsCreated() { return noop } @@ -158,9 +154,18 @@ func (v *SummaryVec) With(labels prometheus.Labels) ObserverMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *SummaryVec) Delete(labels prometheus.Labels) bool { +func (v *SummaryVec) Delete(labels map[string]string) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } return v.SummaryVec.Delete(labels) } + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 00000000000..4a19aaa3bf9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/version_parser.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/version_parser.go index d2f7aee437c..d52bd74bdfd 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/version_parser.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/version_parser.go @@ -18,9 +18,11 @@ package metrics import ( "fmt" + "regexp" + "github.com/blang/semver" + apimachineryversion "k8s.io/apimachinery/pkg/version" - "regexp" ) const ( diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/wrappers.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/wrappers.go index fa40314dc6e..6ae8a458acb 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/wrappers.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/wrappers.go @@ -18,7 +18,6 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" ) @@ -79,3 +78,18 @@ type PromRegistry interface { Unregister(prometheus.Collector) bool Gather() ([]*dto.MetricFamily, error) } + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/version/BUILD index c9aba34313a..a8a3fc75cd5 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/version/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/BUILD @@ -21,7 +21,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/component-base/version/verflag:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/base.go b/cluster-autoscaler/vendor/k8s.io/component-base/version/base.go index c84c6d2d1c5..e13678c305f 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/version/base.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/base.go @@ -32,7 +32,7 @@ package version // build/mark_new_version.sh to reflect the new version, and then a // git annotated tag (using format vX.Y where X == Major version and Y // == Minor version) is created to point to the commit that updates -// pkg/version/base.go +// component-base/version/base.go var ( // TODO: Deprecate gitMajor and gitMinor, use only gitVersion // instead. First step in deprecation, keep the fields but make diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/def.bzl b/cluster-autoscaler/vendor/k8s.io/component-base/version/def.bzl index ac7571b28f1..77edcbc89bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/version/def.bzl +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/def.bzl @@ -16,8 +16,8 @@ def version_x_defs(): # This should match the list of packages in kube::version::ldflag stamp_pkgs = [ - "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/vendor/k8s.io/component-base/version", + "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", ] # This should match the list of vars in kube::version::ldflags diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/BUILD similarity index 63% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD rename to cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/BUILD index 5d725336ec2..ca3c2b0f5ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/BUILD @@ -4,13 +4,15 @@ load( "@io_bazel_rules_go//go:def.bzl", "go_library", ) +load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs") go_library( name = "go_default_library", srcs = ["verflag.go"], - importpath = "k8s.io/kubernetes/pkg/version/verflag", + importmap = "k8s.io/kubernetes/vendor/k8s.io/component-base/version/verflag", + importpath = "k8s.io/component-base/version/verflag", deps = [ - "//pkg/version:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/verflag.go b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/verflag.go rename to cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go index 18c3ac44247..f27e101caa0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/verflag/verflag.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go @@ -25,7 +25,7 @@ import ( flag "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/version" + "k8s.io/component-base/version" ) type versionValue int diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/BUILD b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/BUILD index 64333e4fba2..cd93d78f9f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/BUILD @@ -17,7 +17,12 @@ go_test( name = "go_default_test", srcs = ["translate_test.go"], embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", + ], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.mod b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.mod index c2d584fa7f9..05f679c29e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.mod +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.mod @@ -11,13 +11,8 @@ require ( ) replace ( - golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 - golang.org/x/lint => golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a - golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db - golang.org/x/time => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d + golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 + golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum index 6bb3ab974af..2cb7a51b2a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum @@ -37,8 +37,9 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -47,18 +48,19 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -79,25 +81,31 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -105,20 +113,27 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -132,22 +147,24 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/BUILD b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/BUILD index befec190443..d3125111b6f 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/BUILD @@ -43,6 +43,7 @@ go_test( "azure_disk_test.go", "azure_file_test.go", "gce_pd_test.go", + "in_tree_volume_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go index 573214032dd..5db1e921f8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go @@ -23,7 +23,7 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,6 +33,8 @@ const ( AWSEBSDriverName = "ebs.csi.aws.com" // AWSEBSInTreePluginName is the name of the intree plugin for EBS AWSEBSInTreePluginName = "kubernetes.io/aws-ebs" + // AWSEBSTopologyKey is the zonal topology key for AWS EBS CSI driver + AWSEBSTopologyKey = "topology." + AWSEBSDriverName + "/zone" ) var _ InTreePlugin = &awsElasticBlockStoreCSITranslator{} @@ -45,8 +47,39 @@ func NewAWSElasticBlockStoreCSITranslator() InTreePlugin { return &awsElasticBlockStoreCSITranslator{} } -// TranslateInTreeStorageClassParametersToCSI translates InTree EBS storage class parameters to CSI storage class +// TranslateInTreeStorageClassToCSI translates InTree EBS storage class parameters to CSI storage class func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) { + var ( + generatedTopologies []v1.TopologySelectorTerm + params = map[string]string{} + ) + for k, v := range sc.Parameters { + switch strings.ToLower(k) { + case fsTypeKey: + params[csiFsTypeKey] = v + case zoneKey: + generatedTopologies = generateToplogySelectors(AWSEBSTopologyKey, []string{v}) + case zonesKey: + generatedTopologies = generateToplogySelectors(AWSEBSTopologyKey, strings.Split(v, ",")) + default: + params[k] = v + } + } + + if len(generatedTopologies) > 0 && len(sc.AllowedTopologies) > 0 { + return nil, fmt.Errorf("cannot simultaneously set allowed topologies and zone/zones parameters") + } else if len(generatedTopologies) > 0 { + sc.AllowedTopologies = generatedTopologies + } else if len(sc.AllowedTopologies) > 0 { + newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, AWSEBSTopologyKey) + if err != nil { + return nil, fmt.Errorf("failed translating allowed topologies: %v", err) + } + sc.AllowedTopologies = newTopologies + } + + sc.Parameters = params + return sc, nil } @@ -57,16 +90,21 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(vol return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume") } ebsSource := volume.AWSElasticBlockStore + volumeHandle, err := KubernetesVolumeIDToEBSVolumeID(ebsSource.VolumeID) + if err != nil { + return nil, fmt.Errorf("failed to translate Kubernetes ID to EBS Volume ID %v", err) + } pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - // A.K.A InnerVolumeSpecName required to match for Unmount - Name: volume.Name, + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, ebsSource.VolumeID), }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ Driver: AWSEBSDriverName, - VolumeHandle: ebsSource.VolumeID, + VolumeHandle: volumeHandle, ReadOnly: ebsSource.ReadOnly, FSType: ebsSource.FSType, VolumeAttributes: map[string]string{ @@ -104,6 +142,10 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreePVToCSI(pv *v1.Persis }, } + if err := translateTopology(pv, AWSEBSTopologyKey); err != nil { + return nil, fmt.Errorf("failed to translate topology: %v", err) + } + pv.Spec.AWSElasticBlockStore = nil pv.Spec.CSI = csiSource return pv, nil @@ -161,6 +203,10 @@ func (t *awsElasticBlockStoreCSITranslator) GetCSIPluginName() string { return AWSEBSDriverName } +func (t *awsElasticBlockStoreCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) { + return volumeHandle, nil +} + // awsVolumeRegMatch represents Regex Match for AWS volume. var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$") diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go index b1e91bdef9c..c9e8472c856 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -70,27 +70,30 @@ func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol azureSource := volume.AzureDisk pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - // A.K.A InnerVolumeSpecName required to match for Unmount - Name: volume.Name, + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", AzureDiskDriverName, azureSource.DiskName), }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ Driver: AzureDiskDriverName, VolumeHandle: azureSource.DataDiskURI, - ReadOnly: *azureSource.ReadOnly, - FSType: *azureSource.FSType, VolumeAttributes: map[string]string{azureDiskKind: "Managed"}, }, }, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, }, } + if azureSource.ReadOnly != nil { + pv.Spec.PersistentVolumeSource.CSI.ReadOnly = *azureSource.ReadOnly + } - if *azureSource.CachingMode != "" { + if azureSource.CachingMode != nil && *azureSource.CachingMode != "" { pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode) } - if *azureSource.FSType != "" { + if azureSource.FSType != nil { + pv.Spec.PersistentVolumeSource.CSI.FSType = *azureSource.FSType pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[azureDiskFSType] = *azureSource.FSType } if azureSource.Kind != nil { @@ -207,6 +210,10 @@ func (t *azureDiskCSITranslator) GetCSIPluginName() string { return AzureDiskDriverName } +func (t *azureDiskCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) { + return volumeHandle, nil +} + func isManagedDisk(diskURI string) bool { if len(diskURI) > 4 && strings.ToLower(diskURI[:4]) == "http" { return false diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go index b78a4f83fda..79045421451 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -65,8 +65,9 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - // A.K.A InnerVolumeSpecName required to match for Unmount - Name: volume.Name, + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName), }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ @@ -176,6 +177,10 @@ func (t *azureFileCSITranslator) GetCSIPluginName() string { return AzureFileDriverName } +func (t *azureFileCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) { + return volumeHandle, nil +} + // get file share info according to volume id, e.g. // input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41" // output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41 diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go index ed9d079b5eb..95677562d1c 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -41,9 +41,14 @@ const ( // "projects/{projectName}/zones/{zoneName}/disks/{diskName}" volIDZonalFmt = "projects/%s/zones/%s/disks/%s" // "projects/{projectName}/regions/{regionName}/disks/{diskName}" - volIDRegionalFmt = "projects/%s/regions/%s/disks/%s" - volIDDiskNameValue = 5 - volIDTotalElements = 6 + volIDRegionalFmt = "projects/%s/regions/%s/disks/%s" + volIDProjectValue = 1 + volIDRegionalityValue = 2 + volIDZoneValue = 3 + volIDDiskNameValue = 5 + volIDTotalElements = 6 + + nodeIDFmt = "projects/%s/zones/%s/instances/%s" // UnspecifiedValue is used for an unknown zone string UnspecifiedValue = "UNSPECIFIED" @@ -60,33 +65,6 @@ func NewGCEPersistentDiskCSITranslator() InTreePlugin { return &gcePersistentDiskCSITranslator{} } -func translateAllowedTopologies(terms []v1.TopologySelectorTerm) ([]v1.TopologySelectorTerm, error) { - if terms == nil { - return nil, nil - } - - newTopologies := []v1.TopologySelectorTerm{} - for _, term := range terms { - newTerm := v1.TopologySelectorTerm{} - for _, exp := range term.MatchLabelExpressions { - var newExp v1.TopologySelectorLabelRequirement - if exp.Key == v1.LabelZoneFailureDomain { - newExp = v1.TopologySelectorLabelRequirement{ - Key: GCEPDTopologyKey, - Values: exp.Values, - } - } else if exp.Key == GCEPDTopologyKey { - newExp = exp - } else { - return nil, fmt.Errorf("unknown topology key: %v", exp.Key) - } - newTerm.MatchLabelExpressions = append(newTerm.MatchLabelExpressions, newExp) - } - newTopologies = append(newTopologies, newTerm) - } - return newTopologies, nil -} - func generateToplogySelectors(key string, values []string) []v1.TopologySelectorTerm { return []v1.TopologySelectorTerm{ { @@ -107,13 +85,13 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassToCSI(sc *st np := map[string]string{} for k, v := range sc.Parameters { switch strings.ToLower(k) { - case "fstype": + case fsTypeKey: // prefixed fstype parameter is stripped out by external provisioner - np["csi.storage.k8s.io/fstype"] = v + np[csiFsTypeKey] = v // Strip out zone and zones parameters and translate them into topologies instead - case "zone": + case zoneKey: generatedTopologies = generateToplogySelectors(GCEPDTopologyKey, []string{v}) - case "zones": + case zonesKey: generatedTopologies = generateToplogySelectors(GCEPDTopologyKey, strings.Split(v, ",")) default: np[k] = v @@ -125,7 +103,7 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassToCSI(sc *st } else if len(generatedTopologies) > 0 { sc.AllowedTopologies = generatedTopologies } else if len(sc.AllowedTopologies) > 0 { - newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies) + newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, GCEPDTopologyKey) if err != nil { return nil, fmt.Errorf("failed translating allowed topologies: %v", err) } @@ -197,10 +175,19 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume partition = strconv.Itoa(int(pdSource.Partition)) } - pv := &v1.PersistentVolume{ + var am v1.PersistentVolumeAccessMode + if pdSource.ReadOnly { + am = v1.ReadOnlyMany + } else { + am = v1.ReadWriteOnce + } + + fsMode := v1.PersistentVolumeFilesystem + return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - // A.K.A InnerVolumeSpecName required to match for Unmount - Name: volume.Name, + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", GCEPDDriverName, pdSource.PDName), }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ @@ -214,10 +201,10 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume }, }, }, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + AccessModes: []v1.PersistentVolumeAccessMode{am}, + VolumeMode: &fsMode, }, - } - return pv, nil + }, nil } // TranslateInTreePVToCSI takes a PV with GCEPersistentDisk set from in-tree @@ -263,6 +250,10 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.Persisten }, } + if err := translateTopology(pv, GCEPDTopologyKey); err != nil { + return nil, fmt.Errorf("failed to translate topology: %v", err) + } + pv.Spec.PersistentVolumeSource.GCEPersistentDisk = nil pv.Spec.PersistentVolumeSource.CSI = csiSource pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes) @@ -328,10 +319,53 @@ func (g *gcePersistentDiskCSITranslator) GetCSIPluginName() string { return GCEPDDriverName } +// RepairVolumeHandle returns a fully specified volume handle by inferring +// project, zone/region from the node ID if the volume handle has UNSPECIFIED +// sections +func (g *gcePersistentDiskCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) { + var err error + tok := strings.Split(volumeHandle, "/") + if len(tok) < volIDTotalElements { + return "", fmt.Errorf("volume handle has wrong number of elements; got %v, wanted %v or more", len(tok), volIDTotalElements) + } + if tok[volIDProjectValue] != UnspecifiedValue { + return volumeHandle, nil + } + + nodeTok := strings.Split(nodeID, "/") + if len(nodeTok) < volIDTotalElements { + return "", fmt.Errorf("node handle has wrong number of elements; got %v, wanted %v or more", len(nodeTok), volIDTotalElements) + } + + switch tok[volIDRegionalityValue] { + case "zones": + zone := "" + if tok[volIDZoneValue] == UnspecifiedValue { + zone = nodeTok[volIDZoneValue] + } else { + zone = tok[volIDZoneValue] + } + return fmt.Sprintf(volIDZonalFmt, nodeTok[volIDProjectValue], zone, tok[volIDDiskNameValue]), nil + case "regions": + region := "" + if tok[volIDZoneValue] == UnspecifiedValue { + region, err = getRegionFromZones([]string{nodeTok[volIDZoneValue]}) + if err != nil { + return "", fmt.Errorf("failed to get region from zone %s: %v", nodeTok[volIDZoneValue], err) + } + } else { + region = tok[volIDZoneValue] + } + return fmt.Sprintf(volIDRegionalFmt, nodeTok[volIDProjectValue], region, tok[volIDDiskNameValue]), nil + default: + return "", fmt.Errorf("expected volume handle to have zones or regions regionality value, got: %s", tok[volIDRegionalityValue]) + } +} + func pdNameFromVolumeID(id string) (string, error) { splitID := strings.Split(id, "/") - if len(splitID) != volIDTotalElements { - return "", fmt.Errorf("failed to get id components. Expected projects/{project}/zones/{zone}/disks/{name}. Got: %s", id) + if len(splitID) < volIDTotalElements { + return "", fmt.Errorf("failed to get id components.Got: %v, wanted %v components or more. ", len(splitID), volIDTotalElements) } return splitID[volIDDiskNameValue], nil } diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/in_tree_volume.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/in_tree_volume.go index 2095084d82b..eda5c10c27a 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/in_tree_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/in_tree_volume.go @@ -17,8 +17,14 @@ limitations under the License. package plugins import ( - "k8s.io/api/core/v1" + "errors" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/util/sets" + cloudvolume "k8s.io/cloud-provider/volume" ) // InTreePlugin handles translations between CSI and in-tree sources in a PV @@ -55,4 +61,136 @@ type InTreePlugin interface { // GetCSIPluginName returns the name of the CSI plugin that supersedes the in-tree plugin GetCSIPluginName() string + + // RepairVolumeHandle generates a correct volume handle based on node ID information. + RepairVolumeHandle(volumeHandle, nodeID string) (string, error) +} + +const ( + // fsTypeKey is the deprecated storage class parameter key for fstype + fsTypeKey = "fstype" + // csiFsTypeKey is the storage class parameter key for CSI fstype + csiFsTypeKey = "csi.storage.k8s.io/fstype" + // zoneKey is the deprecated storage class parameter key for zone + zoneKey = "zone" + // zonesKey is the deprecated storage class parameter key for zones + zonesKey = "zones" +) + +// replaceTopology overwrites an existing topology key by a new one. +func replaceTopology(pv *v1.PersistentVolume, oldKey, newKey string) error { + for i := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { + for j, r := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions { + if r.Key == oldKey { + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions[j].Key = newKey + } + } + } + return nil +} + +// getTopologyZones returns all topology zones with the given key found in the PV. +func getTopologyZones(pv *v1.PersistentVolume, key string) []string { + if pv.Spec.NodeAffinity == nil || + pv.Spec.NodeAffinity.Required == nil || + len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) < 1 { + return nil + } + + var values []string + for i := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { + for _, r := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions { + if r.Key == key { + values = append(values, r.Values...) + } + } + } + return values +} + +// addTopology appends the topology to the given PV. +func addTopology(pv *v1.PersistentVolume, topologyKey string, zones []string) error { + // Make sure there are no duplicate or empty strings + filteredZones := sets.String{} + for i := range zones { + zone := strings.TrimSpace(zones[i]) + if len(zone) > 0 { + filteredZones.Insert(zone) + } + } + + zones = filteredZones.UnsortedList() + if len(zones) < 1 { + return errors.New("there are no valid zones to add to pv") + } + + // Make sure the necessary fields exist + pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity) + pv.Spec.NodeAffinity.Required = new(v1.NodeSelector) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1) + + topology := v1.NodeSelectorRequirement{ + Key: topologyKey, + Operator: v1.NodeSelectorOpIn, + Values: zones, + } + + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = append( + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions, + topology, + ) + + return nil +} + +// translateTopology converts existing zone labels or in-tree topology to CSI topology. +// In-tree topology has precedence over zone labels. +func translateTopology(pv *v1.PersistentVolume, topologyKey string) error { + // If topology is already set, assume the content is accurate + if len(getTopologyZones(pv, topologyKey)) > 0 { + return nil + } + + zones := getTopologyZones(pv, v1.LabelZoneFailureDomain) + if len(zones) > 0 { + return replaceTopology(pv, v1.LabelZoneFailureDomain, topologyKey) + } + + if label, ok := pv.Labels[v1.LabelZoneFailureDomain]; ok { + zones = strings.Split(label, cloudvolume.LabelMultiZoneDelimiter) + if len(zones) > 0 { + return addTopology(pv, topologyKey, zones) + } + } + + return nil +} + +// translateAllowedTopologies translates allowed topologies within storage class +// from legacy failure domain to given CSI topology key +func translateAllowedTopologies(terms []v1.TopologySelectorTerm, key string) ([]v1.TopologySelectorTerm, error) { + if terms == nil { + return nil, nil + } + + newTopologies := []v1.TopologySelectorTerm{} + for _, term := range terms { + newTerm := v1.TopologySelectorTerm{} + for _, exp := range term.MatchLabelExpressions { + var newExp v1.TopologySelectorLabelRequirement + if exp.Key == v1.LabelZoneFailureDomain { + newExp = v1.TopologySelectorLabelRequirement{ + Key: key, + Values: exp.Values, + } + } else if exp.Key == key { + newExp = exp + } else { + return nil, fmt.Errorf("unknown topology key: %v", exp.Key) + } + newTerm.MatchLabelExpressions = append(newTerm.MatchLabelExpressions, newExp) + } + newTopologies = append(newTopologies, newTerm) + } + return newTopologies, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go index 9965eacf596..f3e21bc760a 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go @@ -19,7 +19,7 @@ package plugins import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,6 +27,8 @@ import ( const ( // CinderDriverName is the name of the CSI driver for Cinder CinderDriverName = "cinder.csi.openstack.org" + // CinderTopologyKey is the zonal topology key for Cinder CSI Driver + CinderTopologyKey = "topology.cinder.csi.openstack.org/zone" // CinderInTreePluginName is the name of the intree plugin for Cinder CinderInTreePluginName = "kubernetes.io/cinder" ) @@ -56,8 +58,9 @@ func (t *osCinderCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volu cinderSource := volume.Cinder pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - // A.K.A InnerVolumeSpecName required to match for Unmount - Name: volume.Name, + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", CinderDriverName, cinderSource.VolumeID), }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ @@ -92,6 +95,10 @@ func (t *osCinderCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) VolumeAttributes: map[string]string{}, } + if err := translateTopology(pv, CinderTopologyKey); err != nil { + return nil, fmt.Errorf("failed to translate topology: %v", err) + } + pv.Spec.Cinder = nil pv.Spec.CSI = csiSource return pv, nil @@ -140,3 +147,7 @@ func (t *osCinderCSITranslator) GetInTreePluginName() string { func (t *osCinderCSITranslator) GetCSIPluginName() string { return CinderDriverName } + +func (t *osCinderCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) { + return volumeHandle, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/translate.go b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/translate.go index dbe2e34a125..668bc872af6 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/translate.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/translate.go @@ -20,7 +20,7 @@ import ( "errors" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" "k8s.io/csi-translation-lib/plugins" ) @@ -35,9 +35,20 @@ var ( } ) +// CSITranslator translates in-tree storage API objects to their equivalent CSI +// API objects. It also provides many helper functions to determine whether +// translation logic exists and the mappings between "in-tree plugin <-> csi driver" +type CSITranslator struct{} + +// New creates a new CSITranslator which does real translation +// for "in-tree plugins <-> csi drivers" +func New() CSITranslator { + return CSITranslator{} +} + // TranslateInTreeStorageClassToCSI takes in-tree Storage Class // and translates it to a set of parameters consumable by CSI plugin -func TranslateInTreeStorageClassToCSI(inTreePluginName string, sc *storage.StorageClass) (*storage.StorageClass, error) { +func (CSITranslator) TranslateInTreeStorageClassToCSI(inTreePluginName string, sc *storage.StorageClass) (*storage.StorageClass, error) { newSC := sc.DeepCopy() for _, curPlugin := range inTreePlugins { if inTreePluginName == curPlugin.GetInTreePluginName() { @@ -50,13 +61,26 @@ func TranslateInTreeStorageClassToCSI(inTreePluginName string, sc *storage.Stora // TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate // the in-tree volume source to a CSIPersistentVolumeSource (wrapped in a PV) // if the translation logic has been implemented. -func TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { +func (CSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { if volume == nil { return nil, fmt.Errorf("persistent volume was nil") } for _, curPlugin := range inTreePlugins { if curPlugin.CanSupportInline(volume) { - return curPlugin.TranslateInTreeInlineVolumeToCSI(volume) + pv, err := curPlugin.TranslateInTreeInlineVolumeToCSI(volume) + if err != nil { + return nil, err + } + // Inline volumes only support PersistentVolumeFilesystem (and not block). + // If VolumeMode has not been set explicitly by plugin-specific + // translator, set it to Filesystem here. + // This is only necessary for inline volumes as the default PV + // initialization that populates VolumeMode does not apply to inline volumes. + if pv.Spec.VolumeMode == nil { + volumeMode := v1.PersistentVolumeFilesystem + pv.Spec.VolumeMode = &volumeMode + } + return pv, nil } } return nil, fmt.Errorf("could not find in-tree plugin translation logic for %#v", volume.Name) @@ -66,7 +90,7 @@ func TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, // the in-tree source to a CSI Source if the translation logic // has been implemented. The input persistent volume will not // be modified -func TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { +func (CSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { if pv == nil { return nil, errors.New("persistent volume was nil") } @@ -82,7 +106,7 @@ func TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, erro // TranslateCSIPVToInTree takes a PV with a CSI PersistentVolume Source and will translate // it to a in-tree Persistent Volume Source for the specific in-tree volume specified // by the `Driver` field in the CSI Source. The input PV object will not be modified. -func TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { +func (CSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { if pv == nil || pv.Spec.CSI == nil { return nil, errors.New("CSI persistent volume was nil") } @@ -97,7 +121,7 @@ func TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, erro // IsMigratableIntreePluginByName tests whether there is migration logic for the in-tree plugin // whose name matches the given name -func IsMigratableIntreePluginByName(inTreePluginName string) bool { +func (CSITranslator) IsMigratableIntreePluginByName(inTreePluginName string) bool { for _, curPlugin := range inTreePlugins { if curPlugin.GetInTreePluginName() == inTreePluginName { return true @@ -108,7 +132,7 @@ func IsMigratableIntreePluginByName(inTreePluginName string) bool { // IsMigratedCSIDriverByName tests whether there exists an in-tree plugin with logic // to migrate to the CSI driver with given name -func IsMigratedCSIDriverByName(csiPluginName string) bool { +func (CSITranslator) IsMigratedCSIDriverByName(csiPluginName string) bool { if _, ok := inTreePlugins[csiPluginName]; ok { return true } @@ -116,7 +140,7 @@ func IsMigratedCSIDriverByName(csiPluginName string) bool { } // GetInTreePluginNameFromSpec returns the plugin name -func GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) { +func (CSITranslator) GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) { if pv != nil { for _, curPlugin := range inTreePlugins { if curPlugin.CanSupport(pv) { @@ -138,7 +162,7 @@ func GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (strin // GetCSINameFromInTreeName returns the name of a CSI driver that supersedes the // in-tree plugin with the given name -func GetCSINameFromInTreeName(pluginName string) (string, error) { +func (CSITranslator) GetCSINameFromInTreeName(pluginName string) (string, error) { for csiDriverName, curPlugin := range inTreePlugins { if curPlugin.GetInTreePluginName() == pluginName { return csiDriverName, nil @@ -149,7 +173,7 @@ func GetCSINameFromInTreeName(pluginName string) (string, error) { // GetInTreeNameFromCSIName returns the name of the in-tree plugin superseded by // a CSI driver with the given name -func GetInTreeNameFromCSIName(pluginName string) (string, error) { +func (CSITranslator) GetInTreeNameFromCSIName(pluginName string) (string, error) { if plugin, ok := inTreePlugins[pluginName]; ok { return plugin.GetInTreePluginName(), nil } @@ -157,7 +181,7 @@ func GetInTreeNameFromCSIName(pluginName string) (string, error) { } // IsPVMigratable tests whether there is migration logic for the given Persistent Volume -func IsPVMigratable(pv *v1.PersistentVolume) bool { +func (CSITranslator) IsPVMigratable(pv *v1.PersistentVolume) bool { for _, curPlugin := range inTreePlugins { if curPlugin.CanSupport(pv) { return true @@ -167,7 +191,7 @@ func IsPVMigratable(pv *v1.PersistentVolume) bool { } // IsInlineMigratable tests whether there is Migration logic for the given Inline Volume -func IsInlineMigratable(vol *v1.Volume) bool { +func (CSITranslator) IsInlineMigratable(vol *v1.Volume) bool { for _, curPlugin := range inTreePlugins { if curPlugin.CanSupportInline(vol) { return true @@ -175,3 +199,11 @@ func IsInlineMigratable(vol *v1.Volume) bool { } return false } + +// RepairVolumeHandle generates a correct volume handle based on node ID information. +func (CSITranslator) RepairVolumeHandle(driverName, volumeHandle, nodeID string) (string, error) { + if plugin, ok := inTreePlugins[driverName]; ok { + return plugin.RepairVolumeHandle(volumeHandle, nodeID) + } + return "", fmt.Errorf("could not find In-Tree driver name for CSI plugin %v", driverName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/klog/klog.go b/cluster-autoscaler/vendor/k8s.io/klog/klog.go index 2520ebdaa74..2712ce0afc1 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/klog.go +++ b/cluster-autoscaler/vendor/k8s.io/klog/klog.go @@ -142,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -226,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -294,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -396,31 +396,23 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -var initDefaultsOnce sync.Once - // InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { - - // Initialize defaults. - initDefaultsOnce.Do(func() { - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - }) - if flagset == nil { flagset = flag.CommandLine } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/LICENSE b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/BUILD new file mode 100644 index 00000000000..d52c7edd0e8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/kube-scheduler/config/v1", + importpath = "k8s.io/kube-scheduler/config/v1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/doc.go similarity index 83% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go rename to cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/doc.go index c768a8c92cf..a94d3193909 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io -// Package api contains scheduler API objects. -package api // import "k8s.io/kubernetes/pkg/scheduler/api" +package v1 // import "k8s.io/kube-scheduler/config/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/register.go similarity index 56% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go rename to cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/register.go index 4852cd559e5..d5fcf8315a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/register.go @@ -14,42 +14,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -// TODO: remove this, scheduler should not have its own scheme. -var Scheme = runtime.NewScheme() +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" // SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "scheduler" group -var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} var ( - // SchemeBuilder defines a SchemeBuilder object. + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is used to add stored functions to scheme. + // AddToScheme is a global function that registers this API group & version to a scheme AddToScheme = SchemeBuilder.AddToScheme ) -func init() { - if err := addKnownTypes(Scheme); err != nil { - // Programmer error. - panic(err) - } -} - +// addKnownTypes registers known types to the given scheme func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } scheme.AddKnownTypes(SchemeGroupVersion, &Policy{}, ) + // also register into the v1 group for API backward compatibility + scheme.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &Policy{}) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/types.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go similarity index 69% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/types.go rename to cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go index f7a67a11067..a7dbdbb97f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go @@ -20,9 +20,7 @@ import ( gojson "encoding/json" "time" - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -35,11 +33,11 @@ type Policy struct { // Holds the information to configure the priority functions Priorities []PriorityPolicy `json:"priorities"` // Holds the information to communicate with the extender(s) - ExtenderConfigs []ExtenderConfig `json:"extenders"` + Extenders []Extender `json:"extenders"` // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"` // When AlwaysCheckAllPredicates is set to true, scheduler checks all // the configured predicates even after one or more of them fails. @@ -66,7 +64,7 @@ type PriorityPolicy struct { Name string `json:"name"` // The numeric multiplier for the node scores that the priority function generates // The weight should be non-zero and can be a positive or a negative integer - Weight int `json:"weight"` + Weight int64 `json:"weight"` // Holds the parameters to configure the given priority function Argument *PriorityArgument `json:"argument"` } @@ -130,31 +128,31 @@ type LabelPreference struct { // RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. type RequestedToCapacityRatioArguments struct { // Array of point defining priority function shape. - UtilizationShape []UtilizationShapePoint `json:"shape"` - Resources []ResourceSpec `json:"resources,omitempty"` + Shape []UtilizationShapePoint `json:"shape"` + Resources []ResourceSpec `json:"resources,omitempty"` } // UtilizationShapePoint represents single point of priority function shape. type UtilizationShapePoint struct { // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int `json:"utilization"` + Utilization int32 `json:"utilization"` // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int `json:"score"` + Score int32 `json:"score"` } // ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. type ResourceSpec struct { // Name of the resource to be managed by RequestedToCapacityRatio function. - Name apiv1.ResourceName `json:"name,casttype=ResourceName"` + Name string `json:"name"` // Weight of the resource. - Weight int `json:"weight,omitempty"` + Weight int64 `json:"weight,omitempty"` } // ExtenderManagedResource describes the arguments of extended resources // managed by an extender. type ExtenderManagedResource struct { // Name is the extended resource name. - Name apiv1.ResourceName `json:"name,casttype=ResourceName"` + Name string `json:"name"` // IgnoredByScheduler indicates whether kube-scheduler should ignore this // resource when applying predicates. IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` @@ -187,9 +185,9 @@ type ExtenderTLSConfig struct { CAData []byte `json:"caData,omitempty"` } -// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, // it is assumed that the extender chose not to provide that extension. -type ExtenderConfig struct { +type Extender struct { // URLPrefix at which the extender is available URLPrefix string `json:"urlPrefix"` // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. @@ -200,7 +198,7 @@ type ExtenderConfig struct { PrioritizeVerb string `json:"prioritizeVerb,omitempty"` // The numeric multiplier for the node scores that the prioritize call generates. // The weight should be a positive integer - Weight int `json:"weight,omitempty"` + Weight int64 `json:"weight,omitempty"` // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // can implement this function. @@ -231,125 +229,14 @@ type ExtenderConfig struct { Ignorable bool `json:"ignorable,omitempty"` } -// caseInsensitiveExtenderConfig is a type alias which lets us use the stdlib case-insensitive decoding +// caseInsensitiveExtender is a type alias which lets us use the stdlib case-insensitive decoding // to preserve compatibility with incorrectly specified scheduler config fields: // * BindVerb, which originally did not specify a json tag, and required upper-case serialization in 1.7 // * TLSConfig, which uses a struct not intended for serialization, and does not include any json tags -type caseInsensitiveExtenderConfig *ExtenderConfig +type caseInsensitiveExtender *Extender // UnmarshalJSON implements the json.Unmarshaller interface. // This preserves compatibility with incorrect case-insensitive configuration fields. -func (t *ExtenderConfig) UnmarshalJSON(b []byte) error { - return gojson.Unmarshal(b, caseInsensitiveExtenderConfig(t)) -} - -// ExtenderArgs represents the arguments needed by the extender to filter/prioritize -// nodes for a pod. -type ExtenderArgs struct { - // Pod being scheduled - Pod *apiv1.Pod `json:"pod"` - // List of candidate nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == false - Nodes *apiv1.NodeList `json:"nodes,omitempty"` - // List of candidate node names where the pod can be scheduled; to be - // populated only if ExtenderConfig.NodeCacheCapable == true - NodeNames *[]string `json:"nodenames,omitempty"` -} - -// ExtenderPreemptionResult represents the result returned by preemption phase of extender. -type ExtenderPreemptionResult struct { - NodeNameToMetaVictims map[string]*MetaVictims `json:"nodeNameToMetaVictims,omitempty"` -} - -// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes. -type ExtenderPreemptionArgs struct { - // Pod being scheduled - Pod *apiv1.Pod `json:"pod"` - // Victims map generated by scheduler preemption phase - // Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims. - NodeNameToVictims map[string]*Victims `json:"nodeToVictims,omitempty"` - NodeNameToMetaVictims map[string]*MetaVictims `json:"nodeNameToMetaVictims,omitempty"` -} - -// Victims represents: -// pods: a group of pods expected to be preempted. -// numPDBViolations: the count of violations of PodDisruptionBudget -type Victims struct { - Pods []*apiv1.Pod `json:"pods"` - NumPDBViolations int `json:"numPDBViolations"` -} - -// MetaPod represent identifier for a v1.Pod -type MetaPod struct { - UID string `json:"uid"` -} - -// MetaVictims represents: -// pods: a group of pods expected to be preempted. -// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. -// numPDBViolations: the count of violations of PodDisruptionBudget -type MetaVictims struct { - Pods []*MetaPod `json:"pods"` - NumPDBViolations int `json:"numPDBViolations"` -} - -// FailedNodesMap represents the filtered out nodes, with node names and failure messages -type FailedNodesMap map[string]string - -// ExtenderFilterResult represents the results of a filter call to an extender -type ExtenderFilterResult struct { - // Filtered set of nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == false - Nodes *apiv1.NodeList `json:"nodes,omitempty"` - // Filtered set of nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == true - NodeNames *[]string `json:"nodenames,omitempty"` - // Filtered out nodes where the pod can't be scheduled and the failure messages - FailedNodes FailedNodesMap `json:"failedNodes,omitempty"` - // Error message indicating failure - Error string `json:"error,omitempty"` -} - -// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node. -type ExtenderBindingArgs struct { - // PodName is the name of the pod being bound - PodName string - // PodNamespace is the namespace of the pod being bound - PodNamespace string - // PodUID is the UID of the pod being bound - PodUID types.UID - // Node selected by the scheduler - Node string -} - -// ExtenderBindingResult represents the result of binding of a pod to a node from an extender. -type ExtenderBindingResult struct { - // Error message indicating failure - Error string -} - -// HostPriority represents the priority of scheduling to a particular host, higher priority is better. -type HostPriority struct { - // Name of the host - Host string `json:"host"` - // Score associated with the host - Score int `json:"score"` -} - -// HostPriorityList declares a []HostPriority type. -type HostPriorityList []HostPriority - -func (h HostPriorityList) Len() int { - return len(h) -} - -func (h HostPriorityList) Less(i, j int) bool { - if h[i].Score == h[j].Score { - return h[i].Host < h[j].Host - } - return h[i].Score < h[j].Score -} - -func (h HostPriorityList) Swap(i, j int) { - h[i], h[j] = h[j], h[i] +func (t *Extender) UnmarshalJSON(b []byte) error { + return gojson.Unmarshal(b, caseInsensitiveExtender(t)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go similarity index 54% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go rename to cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go index c8a8c6fe204..211db388ba2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go @@ -18,82 +18,14 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package api +package v1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { - *out = *in - if in.Pod != nil { - in, out := &in.Pod, &out.Pod - *out = new(v1.Pod) - (*in).DeepCopyInto(*out) - } - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = new(v1.NodeList) - (*in).DeepCopyInto(*out) - } - if in.NodeNames != nil { - in, out := &in.NodeNames, &out.NodeNames - *out = new([]string) - if **in != nil { - in, out := *in, *out - *out = make([]string, len(*in)) - copy(*out, *in) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs. -func (in *ExtenderArgs) DeepCopy() *ExtenderArgs { - if in == nil { - return nil - } - out := new(ExtenderArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs. -func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs { - if in == nil { - return nil - } - out := new(ExtenderBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult. -func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult { - if in == nil { - return nil - } - out := new(ExtenderBindingResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) { +func (in *Extender) DeepCopyInto(out *Extender) { *out = *in if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig @@ -108,49 +40,12 @@ func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig. -func (in *ExtenderConfig) DeepCopy() *ExtenderConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. +func (in *Extender) DeepCopy() *Extender { if in == nil { return nil } - out := new(ExtenderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = new(v1.NodeList) - (*in).DeepCopyInto(*out) - } - if in.NodeNames != nil { - in, out := &in.NodeNames, &out.NodeNames - *out = new([]string) - if **in != nil { - in, out := *in, *out - *out = make([]string, len(*in)) - copy(*out, *in) - } - } - if in.FailedNodes != nil { - in, out := &in.FailedNodes, &out.FailedNodes - *out = make(FailedNodesMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult. -func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult { - if in == nil { - return nil - } - out := new(ExtenderFilterResult) + out := new(Extender) in.DeepCopyInto(out) return out } @@ -171,88 +66,6 @@ func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) { - *out = *in - if in.Pod != nil { - in, out := &in.Pod, &out.Pod - *out = new(v1.Pod) - (*in).DeepCopyInto(*out) - } - if in.NodeNameToVictims != nil { - in, out := &in.NodeNameToVictims, &out.NodeNameToVictims - *out = make(map[string]*Victims, len(*in)) - for key, val := range *in { - var outVal *Victims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(Victims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - if in.NodeNameToMetaVictims != nil { - in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims - *out = make(map[string]*MetaVictims, len(*in)) - for key, val := range *in { - var outVal *MetaVictims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(MetaVictims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs. -func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs { - if in == nil { - return nil - } - out := new(ExtenderPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) { - *out = *in - if in.NodeNameToMetaVictims != nil { - in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims - *out = make(map[string]*MetaVictims, len(*in)) - for key, val := range *in { - var outVal *MetaVictims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(MetaVictims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult. -func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult { - if in == nil { - return nil - } - out := new(ExtenderPreemptionResult) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { *out = *in @@ -284,64 +97,6 @@ func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) { - { - in := &in - *out = make(FailedNodesMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap. -func (in FailedNodesMap) DeepCopy() FailedNodesMap { - if in == nil { - return nil - } - out := new(FailedNodesMap) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPriority) DeepCopyInto(out *HostPriority) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority. -func (in *HostPriority) DeepCopy() *HostPriority { - if in == nil { - return nil - } - out := new(HostPriority) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) { - { - in := &in - *out = make(HostPriorityList, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList. -func (in HostPriorityList) DeepCopy() HostPriorityList { - if in == nil { - return nil - } - out := new(HostPriorityList) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LabelPreference) DeepCopyInto(out *LabelPreference) { *out = *in @@ -379,49 +134,6 @@ func (in *LabelsPresence) DeepCopy() *LabelsPresence { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetaPod) DeepCopyInto(out *MetaPod) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod. -func (in *MetaPod) DeepCopy() *MetaPod { - if in == nil { - return nil - } - out := new(MetaPod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetaVictims) DeepCopyInto(out *MetaVictims) { - *out = *in - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = make([]*MetaPod, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(MetaPod) - **out = **in - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims. -func (in *MetaVictims) DeepCopy() *MetaVictims { - if in == nil { - return nil - } - out := new(MetaVictims) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in @@ -440,9 +152,9 @@ func (in *Policy) DeepCopyInto(out *Policy) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ExtenderConfigs != nil { - in, out := &in.ExtenderConfigs, &out.ExtenderConfigs - *out = make([]ExtenderConfig, len(*in)) + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]Extender, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -570,8 +282,8 @@ func (in *PriorityPolicy) DeepCopy() *PriorityPolicy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapacityRatioArguments) { *out = *in - if in.UtilizationShape != nil { - in, out := &in.UtilizationShape, &out.UtilizationShape + if in.Shape != nil { + in, out := &in.Shape, &out.Shape *out = make([]UtilizationShapePoint, len(*in)) copy(*out, *in) } @@ -661,30 +373,3 @@ func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Victims) DeepCopyInto(out *Victims) { - *out = *in - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = make([]*v1.Pod, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.Pod) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims. -func (in *Victims) DeepCopy() *Victims { - if in == nil { - return nil - } - out := new(Victims) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/BUILD new file mode 100644 index 00000000000..d9ca51ea527 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/kube-scheduler/config/v1alpha1", + importpath = "k8s.io/kube-scheduler/config/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/doc.go new file mode 100644 index 00000000000..73c9b6734e8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io + +package v1alpha1 // import "k8s.io/kube-scheduler/config/v1alpha1" diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/register.go new file mode 100644 index 00000000000..c42f8412fc2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeSchedulerConfiguration{}, + ) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/types.go new file mode 100644 index 00000000000..2d8fdc82280 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/types.go @@ -0,0 +1,223 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +const ( + // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") + SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem + + // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") + SchedulerDefaultLockObjectName = "kube-scheduler" + + // SchedulerDefaultProviderName defines the default provider names + SchedulerDefaultProviderName = "DefaultProvider" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerConfiguration configures a scheduler +type KubeSchedulerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // SchedulerName is name of the scheduler, used to select which pods + // will be processed by this scheduler, based on pod's "spec.SchedulerName". + SchedulerName *string `json:"schedulerName,omitempty"` + // AlgorithmSource specifies the scheduler algorithm source. + AlgorithmSource SchedulerAlgorithmSource `json:"algorithmSource"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight *int32 `json:"hardPodAffinitySymmetricWeight,omitempty"` + + // LeaderElection defines the configuration of leader election client. + LeaderElection KubeSchedulerLeaderElectionConfiguration `json:"leaderElection"` + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` + + // DebuggingConfiguration holds configuration for Debugging related features + // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration + componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` + + // DisablePreemption disables the pod preemption feature. + DisablePreemption *bool `json:"disablePreemption,omitempty"` + + // PercentageOfNodeToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. + PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` + + // Duration to wait for a binding operation to complete before timing out + // Value must be non-negative integer. The value zero indicates no waiting. + // If this value is nil, the default value will be used. + BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds"` + + // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. + // If specified, it must be greater than 0. If this value is null, the default value (1s) + // will be used. + PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds"` + + // PodMaxBackoffSeconds is the max backoff for unschedulable pods. + // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, + // the default value (10s) will be used. + PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds"` + + // Plugins specify the set of plugins that should be enabled or disabled. Enabled plugins are the + // ones that should be enabled in addition to the default plugins. Disabled plugins are any of the + // default plugins that should be disabled. + // When no enabled or disabled plugin is specified for an extension point, default plugins for + // that extension point will be used if there is any. + Plugins *Plugins `json:"plugins,omitempty"` + + // PluginConfig is an optional set of custom plugin arguments for each plugin. + // Omitting config args for a plugin is equivalent to using the default config for that plugin. + PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` +} + +// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source +// field must be specified, and source fields are mutually exclusive. +type SchedulerAlgorithmSource struct { + // Policy is a policy based algorithm source. + Policy *SchedulerPolicySource `json:"policy,omitempty"` + // Provider is the name of a scheduling algorithm provider to use. + Provider *string `json:"provider,omitempty"` +} + +// SchedulerPolicySource configures a means to obtain a scheduler Policy. One +// source field must be specified, and source fields are mutually exclusive. +type SchedulerPolicySource struct { + // File is a file policy source. + File *SchedulerPolicyFileSource `json:"file,omitempty"` + // ConfigMap is a config map policy source. + ConfigMap *SchedulerPolicyConfigMapSource `json:"configMap,omitempty"` +} + +// SchedulerPolicyFileSource is a policy serialized to disk and accessed via +// path. +type SchedulerPolicyFileSource struct { + // Path is the location of a serialized policy. + Path string `json:"path"` +} + +// SchedulerPolicyConfigMapSource is a policy serialized into a config map value +// under the SchedulerPolicyConfigMapKey key. +type SchedulerPolicyConfigMapSource struct { + // Namespace is the namespace of the policy config map. + Namespace string `json:"namespace"` + // Name is the name of hte policy config map. + Name string `json:"name"` +} + +// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration +// to include scheduler specific configuration. +type KubeSchedulerLeaderElectionConfiguration struct { + componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:",inline"` + // LockObjectNamespace defines the namespace of the lock object + // DEPRECATED: will be removed in favor of resourceNamespace + LockObjectNamespace string `json:"lockObjectNamespace"` + // LockObjectName defines the lock object name + // DEPRECATED: will be removed in favor of resourceName + LockObjectName string `json:"lockObjectName"` +} + +// Plugins include multiple extension points. When specified, the list of plugins for +// a particular extension point are the only ones enabled. If an extension point is +// omitted from the config, then the default set of plugins is used for that extension point. +// Enabled plugins are called in the order specified here, after default plugins. If they need to +// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. +type Plugins struct { + // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. + QueueSort *PluginSet `json:"queueSort,omitempty"` + + // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. + PreFilter *PluginSet `json:"preFilter,omitempty"` + + // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. + Filter *PluginSet `json:"filter,omitempty"` + + // PostFilter is a list of plugins that are invoked after filtering out infeasible nodes. + PostFilter *PluginSet `json:"postFilter,omitempty"` + + // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. + Score *PluginSet `json:"score,omitempty"` + + // Reserve is a list of plugins invoked when reserving a node to run the pod. + Reserve *PluginSet `json:"reserve,omitempty"` + + // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. + Permit *PluginSet `json:"permit,omitempty"` + + // PreBind is a list of plugins that should be invoked before a pod is bound. + PreBind *PluginSet `json:"preBind,omitempty"` + + // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. + // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. + Bind *PluginSet `json:"bind,omitempty"` + + // PostBind is a list of plugins that should be invoked after a pod is successfully bound. + PostBind *PluginSet `json:"postBind,omitempty"` + + // Unreserve is a list of plugins invoked when a pod that was previously reserved is rejected in a later phase. + Unreserve *PluginSet `json:"unreserve,omitempty"` +} + +// PluginSet specifies enabled and disabled plugins for an extension point. +// If an array is empty, missing, or nil, default plugins at that extension point will be used. +type PluginSet struct { + // Enabled specifies plugins that should be enabled in addition to default plugins. + // These are called after default plugins and in the same order specified here. + Enabled []Plugin `json:"enabled,omitempty"` + // Disabled specifies default plugins that should be disabled. + // When all default plugins need to be disabled, an array containing only one "*" should be provided. + Disabled []Plugin `json:"disabled,omitempty"` +} + +// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. +type Plugin struct { + // Name defines the name of plugin + Name string `json:"name"` + // Weight defines the weight of plugin, only used for Score plugins. + Weight *int32 `json:"weight,omitempty"` +} + +// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +// It is up to the plugin to process these Args. +type PluginConfig struct { + // Name defines the name of plugin being configured + Name string `json:"name"` + // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. + Args runtime.Unknown `json:"args,omitempty"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f81f35e5802 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,351 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + in.AlgorithmSource.DeepCopyInto(&out.AlgorithmSource) + if in.HardPodAffinitySymmetricWeight != nil { + in, out := &in.HardPodAffinitySymmetricWeight, &out.HardPodAffinitySymmetricWeight + *out = new(int32) + **out = **in + } + in.LeaderElection.DeepCopyInto(&out.LeaderElection) + out.ClientConnection = in.ClientConnection + if in.HealthzBindAddress != nil { + in, out := &in.HealthzBindAddress, &out.HealthzBindAddress + *out = new(string) + **out = **in + } + if in.MetricsBindAddress != nil { + in, out := &in.MetricsBindAddress, &out.MetricsBindAddress + *out = new(string) + **out = **in + } + in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) + if in.DisablePreemption != nil { + in, out := &in.DisablePreemption, &out.DisablePreemption + *out = new(bool) + **out = **in + } + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } + if in.BindTimeoutSeconds != nil { + in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.PodInitialBackoffSeconds != nil { + in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds + *out = new(int64) + **out = **in + } + if in.PodMaxBackoffSeconds != nil { + in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds + *out = new(int64) + **out = **in + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(Plugins) + (*in).DeepCopyInto(*out) + } + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make([]PluginConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopyInto(out *KubeSchedulerLeaderElectionConfiguration) { + *out = *in + in.LeaderElectionConfiguration.DeepCopyInto(&out.LeaderElectionConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerLeaderElectionConfiguration. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerLeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { + *out = *in + in.Args.DeepCopyInto(&out.Args) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. +func (in *PluginConfig) DeepCopy() *PluginConfig { + if in == nil { + return nil + } + out := new(PluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginSet) DeepCopyInto(out *PluginSet) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. +func (in *PluginSet) DeepCopy() *PluginSet { + if in == nil { + return nil + } + out := new(PluginSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugins) DeepCopyInto(out *Plugins) { + *out = *in + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostFilter != nil { + in, out := &in.PostFilter, &out.PostFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Unreserve != nil { + in, out := &in.Unreserve, &out.Unreserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. +func (in *Plugins) DeepCopy() *Plugins { + if in == nil { + return nil + } + out := new(Plugins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerAlgorithmSource) DeepCopyInto(out *SchedulerAlgorithmSource) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(SchedulerPolicySource) + (*in).DeepCopyInto(*out) + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerAlgorithmSource. +func (in *SchedulerAlgorithmSource) DeepCopy() *SchedulerAlgorithmSource { + if in == nil { + return nil + } + out := new(SchedulerAlgorithmSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerPolicyConfigMapSource) DeepCopyInto(out *SchedulerPolicyConfigMapSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyConfigMapSource. +func (in *SchedulerPolicyConfigMapSource) DeepCopy() *SchedulerPolicyConfigMapSource { + if in == nil { + return nil + } + out := new(SchedulerPolicyConfigMapSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerPolicyFileSource) DeepCopyInto(out *SchedulerPolicyFileSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyFileSource. +func (in *SchedulerPolicyFileSource) DeepCopy() *SchedulerPolicyFileSource { + if in == nil { + return nil + } + out := new(SchedulerPolicyFileSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerPolicySource) DeepCopyInto(out *SchedulerPolicySource) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(SchedulerPolicyFileSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(SchedulerPolicyConfigMapSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicySource. +func (in *SchedulerPolicySource) DeepCopy() *SchedulerPolicySource { + if in == nil { + return nil + } + out := new(SchedulerPolicySource) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go index b2aeb87cc7a..1c03d43162f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go @@ -710,6 +710,10 @@ type KubeletConfiguration struct { // Default: nil // +optional KubeReserved map[string]string `json:"kubeReserved,omitempty"` + // This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. + // This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. + // This option overwrites CPUs provided by system-reserved and kube-reserved. + ReservedSystemCPUs string `json:"reservedSystemCPUs,omitempty"` // This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. // Dynamic Kubelet Config (beta): This field should not be updated without a full node diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/BUILD similarity index 85% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/BUILD index a6c83daeef5..85b6411943e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/BUILD @@ -11,7 +11,8 @@ go_library( "api.pb.go", "constants.go", ], - importpath = "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1", + importmap = "k8s.io/kubernetes/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1", + importpath = "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go index a3e4847e6ec..bef0086cc23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go @@ -344,7 +344,7 @@ func (m *NUMANode) GetID() int64 { // E.g: // struct Device { // ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", -// State: "Healthy", +// Health: "Healthy", // Topology: // Node: //ID: 1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.proto rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto index 94f2de89f37..07d94aebd05 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto @@ -84,7 +84,7 @@ message NUMANode { /* E.g: * struct Device { * ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", -* State: "Healthy", +* Health: "Healthy", * Topology: * Node: ID: 1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/constants.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/constants.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/constants.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/constants.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/BUILD similarity index 82% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/BUILD index 644c05e1be2..0ea7e6ec74b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/BUILD @@ -11,7 +11,8 @@ go_library( "api.pb.go", "constants.go", ], - importpath = "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1", + importmap = "k8s.io/kubernetes/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1", + importpath = "k8s.io/kubelet/pkg/apis/pluginregistration/v1", deps = [ "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.pb.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.proto rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/constants.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/constants.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD index f81a1dee004..bbabff37d6e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/proxy/apis:go_default_library", "//pkg/proxy/apis/config:go_default_library", "//pkg/proxy/apis/config/scheme:go_default_library", + "//pkg/proxy/apis/config/v1alpha1:go_default_library", "//pkg/proxy/apis/config/validation:go_default_library", "//pkg/proxy/config:go_default_library", "//pkg/proxy/healthcheck:go_default_library", @@ -39,15 +40,14 @@ go_library( "//pkg/util/ipset:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/ipvs:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/version:go_default_library", - "//pkg/version/verflag:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -65,17 +65,20 @@ go_library( "//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/config:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", + "//staging/src/k8s.io/component-base/version/verflag:go_default_library", "//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -83,7 +86,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -91,7 +93,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:dragonfly": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -99,7 +100,13 @@ go_library( ], "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/proxy/metrics:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -107,7 +114,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -115,7 +121,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -123,7 +128,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:netbsd": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -131,7 +135,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:openbsd": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -139,7 +142,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:plan9": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -147,7 +149,6 @@ go_library( ], "@io_bazel_rules_go//go/platform:solaris": [ "//pkg/proxy/metrics:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -194,6 +195,9 @@ go_test( "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/proxy/ipvs:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/proxy/ipvs:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/proxy/ipvs:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go index 941620643d1..74729bfa517 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go @@ -23,8 +23,8 @@ import ( "strings" "k8s.io/klog" + "k8s.io/utils/mount" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/sysctl" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index e41a493d77c..51131702a2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -32,10 +32,13 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + gerrors "github.com/pkg/errors" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/selection" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -53,6 +56,8 @@ import ( cliflag "k8s.io/component-base/cli/flag" componentbaseconfig "k8s.io/component-base/config" "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/version" + "k8s.io/component-base/version/verflag" "k8s.io/klog" "k8s.io/kube-proxy/config/v1alpha1" api "k8s.io/kubernetes/pkg/apis/core" @@ -63,6 +68,7 @@ import ( "k8s.io/kubernetes/pkg/proxy/apis" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" "k8s.io/kubernetes/pkg/proxy/apis/config/validation" "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -77,8 +83,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" "k8s.io/kubernetes/pkg/util/oom" - "k8s.io/kubernetes/pkg/version" - "k8s.io/kubernetes/pkg/version/verflag" "k8s.io/utils/exec" utilpointer "k8s.io/utils/pointer" ) @@ -153,6 +157,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") fs.BoolVar(&o.CleanupIPVS, "cleanup-ipvs", o.CleanupIPVS, "If true and --cleanup is specified, kube-proxy will also flush IPVS rules, in addition to normal cleanup.") + fs.MarkDeprecated("cleanup-ipvs", "In a future release, running --cleanup will always flush IPVS rules") fs.Var(utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Var(utilflag.IPVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") @@ -252,6 +257,7 @@ func (o *Options) eventHandler(ent fsnotify.Event) { if eventOpIs(fsnotify.Write) || eventOpIs(fsnotify.Rename) { // error out when ConfigFile is updated o.errCh <- fmt.Errorf("content of the proxy server's configuration file was updated") + return } o.errCh <- nil } @@ -368,6 +374,20 @@ func addressFromDeprecatedFlags(addr string, port int32) string { return proxyutil.AppendPortIfNeeded(addr, port) } +// newLenientSchemeAndCodecs returns a scheme that has only v1alpha1 registered into +// it and a CodecFactory with strict decoding disabled. +func newLenientSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { + lenientScheme := runtime.NewScheme() + if err := kubeproxyconfig.AddToScheme(lenientScheme); err != nil { + return nil, nil, fmt.Errorf("failed to add kube-proxy config API to lenient scheme: %v", err) + } + if err := kubeproxyconfigv1alpha1.AddToScheme(lenientScheme); err != nil { + return nil, nil, fmt.Errorf("failed to add kube-proxy config v1alpha1 API to lenient scheme: %v", err) + } + lenientCodecs := serializer.NewCodecFactory(lenientScheme, serializer.DisableStrict) + return lenientScheme, &lenientCodecs, nil +} + // loadConfigFromFile loads the contents of file and decodes it as a // KubeProxyConfiguration object. func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyConfiguration, error) { @@ -379,12 +399,34 @@ func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyCon return o.loadConfig(data) } -// loadConfig decodes data as a KubeProxyConfiguration object. +// loadConfig decodes a serialized KubeProxyConfiguration to the internal type. func (o *Options) loadConfig(data []byte) (*kubeproxyconfig.KubeProxyConfiguration, error) { + configObj, gvk, err := proxyconfigscheme.Codecs.UniversalDecoder().Decode(data, nil, nil) if err != nil { - return nil, err + // Try strict decoding first. If that fails decode with a lenient + // decoder, which has only v1alpha1 registered, and log a warning. + // The lenient path is to be dropped when support for v1alpha1 is dropped. + if !runtime.IsStrictDecodingError(err) { + return nil, gerrors.Wrap(err, "failed to decode") + } + + _, lenientCodecs, lenientErr := newLenientSchemeAndCodecs() + if lenientErr != nil { + return nil, lenientErr + } + + configObj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil) + if lenientErr != nil { + // Lenient decoding failed with the current version, return the + // original strict error. + return nil, fmt.Errorf("failed lenient decoding: %v", err) + } + + // Continue with the v1alpha1 object that was decoded leniently, but emit a warning. + klog.Warningf("using lenient decoding as strict decoding failed: %v", err) } + proxyConfig, ok := configObj.(*kubeproxyconfig.KubeProxyConfiguration) if !ok { return nil, fmt.Errorf("got unexpected config type: %v", gvk) @@ -481,7 +523,7 @@ type ProxyServer struct { UseEndpointSlices bool OOMScoreAdj *int32 ConfigSyncPeriod time.Duration - HealthzServer *healthcheck.HealthzServer + HealthzServer *healthcheck.ProxierHealthServer } // createClients creates a kube client and an event client from the given config and masterOverride. @@ -551,6 +593,8 @@ func (s *ProxyServer) Run() error { proxyMux := mux.NewPathRecorderMux("kube-proxy") healthz.InstallHandler(proxyMux) proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") fmt.Fprintf(w, "%s", s.ProxyMode) }) proxyMux.Handle("/metrics", legacyregistry.Handler()) @@ -620,6 +664,7 @@ func (s *ProxyServer) Run() error { labelSelector := labels.NewSelector() labelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints) + // Make informers that filter out objects that want a non-default service proxy. informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod, informers.WithTweakListOptions(func(options *metav1.ListOptions) { options.LabelSelector = labelSelector.String() @@ -634,7 +679,7 @@ func (s *ProxyServer) Run() error { go serviceConfig.Run(wait.NeverStop) if utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) { - endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1alpha1().EndpointSlices(), s.ConfigSyncPeriod) + endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1beta1().EndpointSlices(), s.ConfigSyncPeriod) endpointSliceConfig.RegisterEventHandler(s.Proxier) go endpointSliceConfig.Run(wait.NeverStop) } else { @@ -647,6 +692,21 @@ func (s *ProxyServer) Run() error { // functions must configure their shared informer event handlers first. informerFactory.Start(wait.NeverStop) + if utilfeature.DefaultFeatureGate.Enabled(features.ServiceTopology) { + // Make an informer that selects for our nodename. + currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod, + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String() + })) + nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.ConfigSyncPeriod) + nodeConfig.RegisterEventHandler(s.Proxier) + go nodeConfig.Run(wait.NeverStop) + + // This has to start after the calls to NewNodeConfig because that must + // configure the shared informer event handler first. + currentNodeInformerFactory.Start(wait.NeverStop) + } + // Birth Cry after the birth is successful s.birthCry() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go index e233d42e7a9..9c5cb19f236 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go @@ -26,7 +26,7 @@ import ( "net" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/proxy/metrics" "k8s.io/kubernetes/pkg/proxy/userspace" "k8s.io/kubernetes/pkg/util/configz" - utildbus "k8s.io/kubernetes/pkg/util/dbus" utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" @@ -84,13 +83,11 @@ func newProxyServer( var ipvsInterface utilipvs.Interface var kernelHandler ipvs.KernelHandler var ipsetInterface utilipset.Interface - var dbus utildbus.Interface // Create a iptables utils. execer := exec.New() - dbus = utildbus.New() - iptInterface = utiliptables.New(execer, dbus, protocol) + iptInterface = utiliptables.New(execer, protocol) kernelHandler = ipvs.NewLinuxKernelHandler() ipsetInterface = utilipset.New(execer) canUseIPVS, _ := ipvs.CanUseIPVSProxier(kernelHandler, ipsetInterface) @@ -128,11 +125,9 @@ func newProxyServer( Namespace: "", } - var healthzServer *healthcheck.HealthzServer - var healthzUpdater healthcheck.HealthzUpdater + var healthzServer *healthcheck.ProxierHealthServer if len(config.HealthzBindAddress) > 0 { - healthzServer = healthcheck.NewDefaultHealthzServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) - healthzUpdater = healthzServer + healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) } var proxier proxy.Provider @@ -142,7 +137,8 @@ func newProxyServer( if nodeIP.IsUnspecified() { nodeIP = utilnode.GetNodeIP(client, hostname) if nodeIP == nil { - return nil, fmt.Errorf("unable to get node IP for hostname %s", hostname) + klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") + nodeIP = net.ParseIP("127.0.0.1") } } if proxyMode == proxyModeIPTables { @@ -165,7 +161,7 @@ func newProxyServer( hostname, nodeIP, recorder, - healthzUpdater, + healthzServer, config.NodePortAddresses, ) if err != nil { @@ -181,10 +177,10 @@ func newProxyServer( var ipt [2]utiliptables.Interface if iptInterface.IsIpv6() { ipt[1] = iptInterface - ipt[0] = utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4) + ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIpv4) } else { ipt[0] = iptInterface - ipt[1] = utiliptables.New(execer, dbus, utiliptables.ProtocolIpv6) + ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIpv6) } proxier, err = ipvs.NewDualStackProxier( @@ -253,8 +249,6 @@ func newProxyServer( } } - iptInterface.AddReloadFunc(proxier.Sync) - return &ProxyServer{ Client: client, EventClient: eventClient, @@ -326,7 +320,7 @@ func getProxyMode(proxyMode string, khandle ipvs.KernelHandler, ipsetver ipvs.IP case proxyModeIPVS: return tryIPVSProxy(khandle, ipsetver, kcompat) } - klog.Warningf("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode) + klog.Warningf("Unknown proxy mode %q, assuming iptables proxy", proxyMode) return tryIPTablesProxy(kcompat) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go index bef3f3be017..04b0993973c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go @@ -87,11 +87,9 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi Namespace: "", } - var healthzServer *healthcheck.HealthzServer - var healthzUpdater healthcheck.HealthzUpdater + var healthzServer *healthcheck.ProxierHealthServer if len(config.HealthzBindAddress) > 0 { - healthzServer = healthcheck.NewDefaultHealthzServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) - healthzUpdater = healthzServer + healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) } var proxier proxy.Provider @@ -108,7 +106,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi hostname, utilnode.GetNodeIP(client, hostname), recorder, - healthzUpdater, + healthzServer, config.Winkernel, ) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD index fc364aa548f..2bb87eeea7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD @@ -14,15 +14,14 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//pkg/apis/certificates/v1beta1:go_default_library", + "//pkg/controller/certificates/authority:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/cloudflare/cfssl/config:go_default_library", - "//vendor/github.com/cloudflare/cfssl/signer:go_default_library", - "//vendor/github.com/cloudflare/cfssl/signer/local:go_default_library", ], ) @@ -58,6 +57,7 @@ go_library( "//pkg/kubelet/certificate:go_default_library", "//pkg/kubelet/certificate/bootstrap:go_default_library", "//pkg/kubelet/cm:go_default_library", + "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim:go_default_library", @@ -74,12 +74,9 @@ go_library( "//pkg/util/filesystem:go_default_library", "//pkg/util/flag:go_default_library", "//pkg/util/flock:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/rlimit:go_default_library", - "//pkg/version:go_default_library", - "//pkg/version/verflag:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", @@ -88,6 +85,7 @@ go_library( "//pkg/volume/cinder:go_default_library", "//pkg/volume/configmap:go_default_library", "//pkg/volume/csi:go_default_library", + "//pkg/volume/csimigration:go_default_library", "//pkg/volume/downwardapi:go_default_library", "//pkg/volume/emptydir:go_default_library", "//pkg/volume/fc:go_default_library", @@ -123,11 +121,12 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", @@ -139,13 +138,21 @@ go_library( "//staging/src/k8s.io/client-go/util/keyutil:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", + "//staging/src/k8s.io/component-base/featuregate:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", + "//staging/src/k8s.io/component-base/version/verflag:go_default_library", + "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/inotify:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/utils/inotify:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/auth.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/auth.go index 64109da3f97..22a0285d8b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/auth.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/auth.go @@ -26,9 +26,10 @@ import ( "k8s.io/apiserver/pkg/authentication/authenticatorfactory" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" + "k8s.io/apiserver/pkg/server/dynamiccertificates" clientset "k8s.io/client-go/kubernetes" - authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/server" @@ -42,8 +43,8 @@ func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubel sarClient authorizationclient.SubjectAccessReviewInterface ) if client != nil && !reflect.ValueOf(client).IsNil() { - tokenClient = client.AuthenticationV1beta1().TokenReviews() - sarClient = client.AuthorizationV1beta1().SubjectAccessReviews() + tokenClient = client.AuthenticationV1().TokenReviews() + sarClient = client.AuthorizationV1().SubjectAccessReviews() } authenticator, err := BuildAuthn(tokenClient, config.Authentication) @@ -63,10 +64,19 @@ func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubel // BuildAuthn creates an authenticator compatible with the kubelet's needs func BuildAuthn(client authenticationclient.TokenReviewInterface, authn kubeletconfig.KubeletAuthentication) (authenticator.Request, error) { + var clientCertificateCAContentProvider authenticatorfactory.CAContentProvider + var err error + if len(authn.X509.ClientCAFile) > 0 { + clientCertificateCAContentProvider, err = dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", authn.X509.ClientCAFile) + if err != nil { + return nil, err + } + } + authenticatorConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ - Anonymous: authn.Anonymous.Enabled, - CacheTTL: authn.Webhook.CacheTTL.Duration, - ClientCAFile: authn.X509.ClientCAFile, + Anonymous: authn.Anonymous.Enabled, + CacheTTL: authn.Webhook.CacheTTL.Duration, + ClientCertificateCAContentProvider: clientCertificateCAContentProvider, } if authn.Webhook.Enabled { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD index e76c2e10898..d50c6192bbf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD @@ -32,16 +32,25 @@ go_library( "//pkg/master/ports:go_default_library", "//pkg/util/flag:go_default_library", "//pkg/util/taints:go_default_library", - "//pkg/version/verflag:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", + "//staging/src/k8s.io/component-base/version/verflag:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/google/cadvisor/container/common:go_default_library", + "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", + "//vendor/github.com/google/cadvisor/container/docker:go_default_library", + "//vendor/github.com/google/cadvisor/container/raw:go_default_library", + "//vendor/github.com/google/cadvisor/machine:go_default_library", + "//vendor/github.com/google/cadvisor/manager:go_default_library", + "//vendor/github.com/google/cadvisor/storage:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/google/cadvisor/container/common:go_default_library", "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go index e8853e92701..281491384f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go @@ -26,8 +26,8 @@ import ( // libs that provide registration functions "k8s.io/component-base/logs" + "k8s.io/component-base/version/verflag" "k8s.io/klog" - "k8s.io/kubernetes/pkg/version/verflag" // ensure libs have a chance to globally register their flags _ "k8s.io/kubernetes/pkg/credentialprovider/azure" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go index 1b60f2cea45..7401c510deb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go @@ -416,7 +416,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.") fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version") fs.BoolVar(&f.EnableCAdvisorJSONEndpoints, "enable-cadvisor-json-endpoints", f.EnableCAdvisorJSONEndpoints, "Enable cAdvisor json /spec and /stats/* endpoints.") - fs.MarkDeprecated("enable-cadvisor-json-apis", "will be removed in a future version") + fs.MarkDeprecated("enable-cadvisor-json-endpoints", "will be removed in a future version") } @@ -553,7 +553,7 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.Var(cliflag.NewMapStringString(&c.EvictionMinimumReclaim), "eviction-minimum-reclaim", "A set of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.") fs.Int32Var(&c.PodsPerCore, "pods-per-core", c.PodsPerCore, "Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.") fs.BoolVar(&c.ProtectKernelDefaults, "protect-kernel-defaults", c.ProtectKernelDefaults, "Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults.") - + fs.StringVar(&c.ReservedSystemCPUs, "reserved-cpus", c.ReservedSystemCPUs, "A comma-separated list of CPUs or CPU ranges that are reserved for system and kubernetes usage. This specific list will supersede cpu counts in --system-reserved and --kube-reserved.") // Node Allocatable Flags fs.Var(cliflag.NewMapStringString(&c.SystemReserved), "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") fs.Var(cliflag.NewMapStringString(&c.KubeReserved), "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go index 4ddf1d65f51..af9f708dc52 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go @@ -22,6 +22,8 @@ import ( _ "k8s.io/kubernetes/pkg/credentialprovider/aws" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" + + "k8s.io/component-base/featuregate" "k8s.io/utils/exec" // Volume plugins @@ -53,7 +55,7 @@ import ( ) // ProbeVolumePlugins collects all volume plugins into an easy to use list. -func ProbeVolumePlugins() []volume.VolumePlugin { +func ProbeVolumePlugins(featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by the kubelet binary, not @@ -62,7 +64,11 @@ func ProbeVolumePlugins() []volume.VolumePlugin { // // Kubelet does not currently need to configure volume plugins. // If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig - allPlugins = appendLegacyProviderVolumes(allPlugins) + var err error + allPlugins, err = appendLegacyProviderVolumes(allPlugins, featureGate) + if err != nil { + return allPlugins, err + } allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(volume.VolumeConfig{})...) @@ -83,7 +89,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, local.ProbeVolumePlugins()...) allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...) - return allPlugins + return allPlugins, nil } // GetDynamicPluginProber gets the probers of dynamically discoverable plugins diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providerless.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providerless.go index b73ae5a5d53..dc1bd3c07a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providerless.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providerless.go @@ -18,9 +18,13 @@ limitations under the License. package app -import "k8s.io/kubernetes/pkg/volume" +import ( + "k8s.io/component-base/featuregate" -func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin) []volume.VolumePlugin { + "k8s.io/kubernetes/pkg/volume" +) + +func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) { // no-op when we didn't compile in support for these - return allPlugins + return allPlugins, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providers.go index e40b11a761e..d54706bf1bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins_providers.go @@ -19,21 +19,61 @@ limitations under the License. package app import ( + "k8s.io/component-base/featuregate" + "k8s.io/csi-translation-lib/plugins" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_file" "k8s.io/kubernetes/pkg/volume/cinder" + "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/gcepd" "k8s.io/kubernetes/pkg/volume/vsphere_volume" ) -func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin) []volume.VolumePlugin { - allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, azure_file.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...) +type probeFn func() []volume.VolumePlugin + +func appendPluginBasedOnMigrationFeatureFlags(plugins []volume.VolumePlugin, inTreePluginName string, featureGate featuregate.FeatureGate, pluginMigration, pluginMigrationComplete featuregate.Feature, fn probeFn) ([]volume.VolumePlugin, error) { + // Skip appending the in-tree plugin to the list of plugins to be probed/initialized + // if the CSIMigration feature flag and plugin specific feature flag indicating + // CSI migration is complete + err := csimigration.CheckMigrationFeatureFlags(featureGate, pluginMigration, pluginMigrationComplete) + if err != nil { + klog.Warningf("Unexpected CSI Migration Feature Flags combination detected: %v. CSI Migration may not take effect", err) + // TODO: fail and return here once alpha only tests can set the feature flags for a plugin correctly + } + if featureGate.Enabled(features.CSIMigration) && featureGate.Enabled(pluginMigration) && featureGate.Enabled(pluginMigrationComplete) { + klog.Infof("Skip registration of plugin %s since feature flag %v is enabled", inTreePluginName, pluginMigrationComplete) + return plugins, nil + } + plugins = append(plugins, fn()...) + return plugins, nil +} + +type pluginInfo struct { + pluginMigrationFeature featuregate.Feature + pluginMigrationCompleteFeature featuregate.Feature + pluginProbeFunction probeFn +} + +func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) { + pluginMigrationStatus := make(map[string]pluginInfo) + pluginMigrationStatus[plugins.AWSEBSInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAWS, pluginMigrationCompleteFeature: features.CSIMigrationAWSComplete, pluginProbeFunction: awsebs.ProbeVolumePlugins} + pluginMigrationStatus[plugins.GCEPDInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationGCE, pluginMigrationCompleteFeature: features.CSIMigrationGCEComplete, pluginProbeFunction: gcepd.ProbeVolumePlugins} + pluginMigrationStatus[plugins.CinderInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationOpenStack, pluginMigrationCompleteFeature: features.CSIMigrationOpenStackComplete, pluginProbeFunction: cinder.ProbeVolumePlugins} + pluginMigrationStatus[plugins.AzureDiskInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureDisk, pluginMigrationCompleteFeature: features.CSIMigrationAzureDiskComplete, pluginProbeFunction: azure_dd.ProbeVolumePlugins} + pluginMigrationStatus[plugins.AzureFileInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureFile, pluginMigrationCompleteFeature: features.CSIMigrationAzureFileComplete, pluginProbeFunction: azure_file.ProbeVolumePlugins} + + var err error + for pluginName, pluginInfo := range pluginMigrationStatus { + allPlugins, err = appendPluginBasedOnMigrationFeatureFlags(allPlugins, pluginName, featureGate, pluginInfo.pluginMigrationFeature, pluginInfo.pluginMigrationCompleteFeature, pluginInfo.pluginProbeFunction) + if err != nil { + return allPlugins, err + } + } + allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) - return allPlugins + return allPlugins, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index e497bdcd838..8a705bb139a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -36,6 +36,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/klog" + "k8s.io/utils/mount" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -60,6 +61,9 @@ import ( "k8s.io/client-go/util/keyutil" cloudprovider "k8s.io/cloud-provider" cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/featuregate" + "k8s.io/component-base/version" + "k8s.io/component-base/version/verflag" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -75,6 +79,7 @@ import ( kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap" "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim" @@ -91,12 +96,9 @@ import ( utilfs "k8s.io/kubernetes/pkg/util/filesystem" utilflag "k8s.io/kubernetes/pkg/util/flag" "k8s.io/kubernetes/pkg/util/flock" - "k8s.io/kubernetes/pkg/util/mount" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/rlimit" - "k8s.io/kubernetes/pkg/version" - "k8s.io/kubernetes/pkg/version/verflag" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/utils/exec" @@ -246,7 +248,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API } // use kubeletServer to construct the default KubeletDeps - kubeletDeps, err := UnsecuredDependencies(kubeletServer) + kubeletDeps, err := UnsecuredDependencies(kubeletServer, utilfeature.DefaultFeatureGate) if err != nil { klog.Fatal(err) } @@ -267,7 +269,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // run the kubelet klog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) - if err := Run(kubeletServer, kubeletDeps, stopCh); err != nil { + if err := Run(kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate, stopCh); err != nil { klog.Fatal(err) } }, @@ -361,7 +363,7 @@ func loadConfigFile(name string) (*kubeletconfiginternal.KubeletConfiguration, e // UnsecuredDependencies returns a Dependencies suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization -func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, error) { +func UnsecuredDependencies(s *options.KubeletServer, featureGate featuregate.FeatureGate) (*kubelet.Dependencies, error) { // Initialize the TLS Options tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration) if err != nil { @@ -382,6 +384,10 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err } } + plugins, err := ProbeVolumePlugins(featureGate) + if err != nil { + return nil, err + } return &kubelet.Dependencies{ Auth: nil, // default does not enforce auth[nz] CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here @@ -396,7 +402,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err Subpather: subpather, OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, - VolumePlugins: ProbeVolumePlugins(), + VolumePlugins: plugins, DynamicPluginProber: GetDynamicPluginProber(s.VolumePluginDir, pluginRunner), TLSOptions: tlsOptions}, nil } @@ -405,13 +411,13 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err // The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer. // Otherwise, the caller is assumed to have set up the Dependencies object and a default one will // not be generated. -func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) error { +func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, featureGate featuregate.FeatureGate, stopCh <-chan struct{}) error { // To help debugging, immediately log version klog.Infof("Version: %+v", version.Get()) if err := initForOS(s.KubeletFlags.WindowsService); err != nil { return fmt.Errorf("failed OS init: %v", err) } - if err := run(s, kubeDeps, stopCh); err != nil { + if err := run(s, kubeDeps, featureGate, stopCh); err != nil { return fmt.Errorf("failed to run Kubelet: %v", err) } return nil @@ -468,7 +474,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) } } -func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) (err error) { +func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, featureGate featuregate.FeatureGate, stopCh <-chan struct{}) (err error) { // Set global feature gates based on the value on the initial KubeletServer err = utilfeature.DefaultMutableFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates) if err != nil { @@ -510,7 +516,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } if kubeDeps == nil { - kubeDeps, err = UnsecuredDependencies(s) + kubeDeps, err = UnsecuredDependencies(s, featureGate) if err != nil { return err } @@ -575,13 +581,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan // make a separate client for heartbeat with throttling disabled and a timeout attached heartbeatClientConfig := *clientConfig heartbeatClientConfig.Timeout = s.KubeletConfiguration.NodeStatusUpdateFrequency.Duration - // if the NodeLease feature is enabled, the timeout is the minimum of the lease duration and status update frequency - if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) { - leaseTimeout := time.Duration(s.KubeletConfiguration.NodeLeaseDurationSeconds) * time.Second - if heartbeatClientConfig.Timeout > leaseTimeout { - heartbeatClientConfig.Timeout = leaseTimeout - } + // The timeout is the minimum of the lease duration and status update frequency + leaseTimeout := time.Duration(s.KubeletConfiguration.NodeLeaseDurationSeconds) * time.Second + if heartbeatClientConfig.Timeout > leaseTimeout { + heartbeatClientConfig.Timeout = leaseTimeout } + heartbeatClientConfig.QPS = float32(-1) kubeDeps.HeartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig) if err != nil { @@ -589,14 +594,6 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } } - // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 && - kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { - if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil { - return err - } - } - if kubeDeps.Auth == nil { auth, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration) if err != nil { @@ -644,6 +641,48 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan klog.Info("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") s.CgroupRoot = "/" } + + var reservedSystemCPUs cpuset.CPUSet + var errParse error + if s.ReservedSystemCPUs != "" { + reservedSystemCPUs, errParse = cpuset.Parse(s.ReservedSystemCPUs) + if errParse != nil { + // invalid cpu list is provided, set reservedSystemCPUs to empty, so it won't overwrite kubeReserved/systemReserved + klog.Infof("Invalid ReservedSystemCPUs \"%s\"", s.ReservedSystemCPUs) + return errParse + } + // is it safe do use CAdvisor here ?? + machineInfo, err := kubeDeps.CAdvisorInterface.MachineInfo() + if err != nil { + // if can't use CAdvisor here, fall back to non-explicit cpu list behavor + klog.Warning("Failed to get MachineInfo, set reservedSystemCPUs to empty") + reservedSystemCPUs = cpuset.NewCPUSet() + } else { + reservedList := reservedSystemCPUs.ToSlice() + first := reservedList[0] + last := reservedList[len(reservedList)-1] + if first < 0 || last >= machineInfo.NumCores { + // the specified cpuset is outside of the range of what the machine has + klog.Infof("Invalid cpuset specified by --reserved-cpus") + return fmt.Errorf("Invalid cpuset %q specified by --reserved-cpus", s.ReservedSystemCPUs) + } + } + } else { + reservedSystemCPUs = cpuset.NewCPUSet() + } + + if reservedSystemCPUs.Size() > 0 { + // at cmd option valication phase it is tested either --system-reserved-cgroup or --kube-reserved-cgroup is specified, so overwrite should be ok + klog.Infof("Option --reserved-cpus is specified, it will overwrite the cpu setting in KubeReserved=\"%v\", SystemReserved=\"%v\".", s.KubeReserved, s.SystemReserved) + if s.KubeReserved != nil { + delete(s.KubeReserved, "cpu") + } + if s.SystemReserved == nil { + s.SystemReserved = make(map[string]string) + } + s.SystemReserved["cpu"] = strconv.Itoa(reservedSystemCPUs.Size()) + klog.Infof("After cpu setting is overwritten, KubeReserved=\"%v\", SystemReserved=\"%v\"", s.KubeReserved, s.SystemReserved) + } kubeReserved, err := parseResourceList(s.KubeReserved) if err != nil { return err @@ -686,6 +725,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...), KubeReserved: kubeReserved, SystemReserved: systemReserved, + ReservedSystemCPUs: reservedSystemCPUs, HardEvictionThresholds: hardEvictionThresholds, }, QOSReserved: *experimentalQOSReserved, @@ -721,6 +761,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return err } + // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 && + kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { + if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil { + return err + } + } + if s.HealthzPort > 0 { mux := http.NewServeMux() healthz.InstallHandler(mux) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS index 672e9f3c192..9f9e696f19f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS @@ -15,7 +15,6 @@ reviewers: - eparis - soltysh - madhusudancs -- markturansky - mml - david-mcmahon - jianhuiz diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go index f240f439341..1457e37ed86 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -323,3 +323,14 @@ func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { // Return true if one of the fields have changed. return !isEqual } + +// GetPodPriority returns priority of the given pod. +func GetPodPriority(pod *v1.Pod) int32 { + if pod.Spec.Priority != nil { + return *pod.Spec.Priority + } + // When priority of a running pod is nil, it means it was created at a time + // that there was no global default priority class and the priority class + // name of the pod was empty. So, we resolve to the static default priority. + return 0 +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/BUILD index 9684baccc21..d88640660e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/BUILD @@ -16,6 +16,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/admission/v1", deps = [ "//pkg/apis/admission:go_default_library", + "//pkg/apis/authentication/v1:go_default_library", "//staging/src/k8s.io/api/admission/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/zz_generated.conversion.go index 467a035ed34..82ddddd346e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1/zz_generated.conversion.go @@ -29,6 +29,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" admission "k8s.io/kubernetes/pkg/apis/admission" + authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1" ) func init() { @@ -82,8 +83,7 @@ func autoConvert_v1_AdmissionRequest_To_admission_AdmissionRequest(in *v1.Admiss out.Name = in.Name out.Namespace = in.Namespace out.Operation = admission.Operation(in.Operation) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + if err := authenticationv1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil { return err } if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil { @@ -115,8 +115,7 @@ func autoConvert_admission_AdmissionRequest_To_v1_AdmissionRequest(in *admission out.Name = in.Name out.Namespace = in.Namespace out.Operation = v1.Operation(in.Operation) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + if err := authenticationv1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil { return err } if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD index b3abea12d41..ac5d48726e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD @@ -16,6 +16,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/admission/v1beta1", deps = [ "//pkg/apis/admission:go_default_library", + "//pkg/apis/authentication/v1:go_default_library", "//staging/src/k8s.io/api/admission/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go index 9eaa43b5818..0a55acfa5b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go @@ -29,6 +29,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" admission "k8s.io/kubernetes/pkg/apis/admission" + authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1" ) func init() { @@ -82,8 +83,7 @@ func autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *v1be out.Name = in.Name out.Namespace = in.Namespace out.Operation = admission.Operation(in.Operation) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + if err := authenticationv1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil { return err } if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil { @@ -115,8 +115,7 @@ func autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admi out.Name = in.Name out.Namespace = in.Namespace out.Operation = v1beta1.Operation(in.Operation) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + if err := authenticationv1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil { return err } if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go index 9b9cb24adde..3fa9a075102 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go @@ -263,7 +263,7 @@ type ValidatingWebhook struct { // +optional ObjectSelector *metav1.LabelSelector - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -387,7 +387,7 @@ type MutatingWebhook struct { // +optional ObjectSelector *metav1.LabelSelector - // SideEffects states whether this webhookk has side effects. + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go index 28a26e6ff61..c42ea2b6215 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -332,8 +332,8 @@ type DeploymentSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. +// DEPRECATED. type DeploymentRollback struct { metav1.TypeMeta // Required: This must match the Name of a deployment. @@ -503,7 +503,6 @@ type DeploymentList struct { // DaemonSetUpdateStrategy defines a strategy to update a daemon set. type DaemonSetUpdateStrategy struct { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. // +optional Type DaemonSetUpdateStrategyType @@ -680,10 +679,10 @@ type DaemonSet struct { } const ( - // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. // DaemonSetTemplateGenerationKey is the key of the labels that is added // to daemon set pods to distinguish between old and new pod templates // during DaemonSet template update. + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. DaemonSetTemplateGenerationKey string = "pod-template-generation" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go index 8aa43f9f490..a2ce0f19eaa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go @@ -21,56 +21,11 @@ import ( "strconv" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/apis/apps" - api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions to handle the *int32 -> int32 - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec, - Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec, - Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, - Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy, - Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, - Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, - Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus, - Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus, - Convert_v1_Deployment_To_apps_Deployment, - Convert_apps_Deployment_To_v1_Deployment, - Convert_apps_DaemonSet_To_v1_DaemonSet, - Convert_v1_DaemonSet_To_apps_DaemonSet, - Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec, - Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec, - Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, - Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, - // extensions - // TODO: below conversions should be dropped in favor of auto-generated - // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1_DeploymentSpec_To_apps_DeploymentSpec, - Convert_apps_DeploymentSpec_To_v1_DeploymentSpec, - Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy, - Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy, - Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, - Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, - Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec, - Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec, - ) - if err != nil { - return err - } - return nil -} - func Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas @@ -114,58 +69,6 @@ func Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, o return nil } -func Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { - out.Type = appsv1.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1.RollingUpdateDeployment) - if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { - out.Type = apps.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateDeployment) - if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} - func Convert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { @@ -218,23 +121,6 @@ func Convert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *appsv1.D return nil } -func Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - -func Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - func Convert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy annotations because we change them below @@ -267,17 +153,6 @@ func Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out return nil } -func Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = appsv1.DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = &appsv1.RollingUpdateDaemonSet{} - if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } - return nil -} - func Convert_v1_DaemonSet_To_apps_DaemonSet(in *appsv1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { @@ -316,189 +191,6 @@ func Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1.DaemonSetSpec, ou return nil } -func Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = apps.DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = &apps.RollingUpdateDaemonSet{} - if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } - return nil -} - -func Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if err := Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) - return nil -} - -func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1.StatefulSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = appsv1.PodManagementPolicyType(in.PodManagementPolicy) - if err := Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - return nil -} - -func Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = apps.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = *in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = appsv1.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = new(int32) - *out.RollingUpdate.Partition = in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.ReadyReplicas = in.ReadyReplicas - out.CurrentReplicas = in.CurrentReplicas - out.UpdatedReplicas = in.UpdatedReplicas - out.CurrentRevision = in.CurrentRevision - out.UpdateRevision = in.UpdateRevision - if in.CollisionCount != nil { - out.CollisionCount = new(int32) - *out.CollisionCount = *in.CollisionCount - } - out.Conditions = make([]apps.StatefulSetCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - return nil -} - -func Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1.StatefulSetStatus, s conversion.Scope) error { - if in.ObservedGeneration != nil { - out.ObservedGeneration = *in.ObservedGeneration - } - out.Replicas = in.Replicas - out.ReadyReplicas = in.ReadyReplicas - out.CurrentReplicas = in.CurrentReplicas - out.UpdatedReplicas = in.UpdatedReplicas - out.CurrentRevision = in.CurrentRevision - out.UpdateRevision = in.UpdateRevision - if in.CollisionCount != nil { - out.CollisionCount = new(int32) - *out.CollisionCount = *in.CollisionCount - } - out.Conditions = make([]appsv1.StatefulSetCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - return nil -} - func deepCopyStringMap(m map[string]string) map[string]string { ret := make(map[string]string, len(m)) for k, v := range m { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go index e5567276c11..038d9b35661 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go index 0c0c636dd85..063d1159119 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go @@ -60,16 +60,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { @@ -90,16 +80,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { @@ -120,16 +100,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { @@ -150,16 +120,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1.DeploymentSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { @@ -325,11 +285,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) }); err != nil { @@ -340,56 +295,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1.StatefulSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { @@ -400,46 +315,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*v1.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*v1.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } return nil } @@ -684,6 +564,11 @@ func autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in * return nil } +// Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s) +} + func autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -698,6 +583,11 @@ func autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in * return nil } +// Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in, out, s) +} + func autoConvert_v1_Deployment_To_apps_Deployment(in *v1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { @@ -877,6 +767,11 @@ func autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1.Deploym return nil } +// Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function. +func Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s) +} + func autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { @@ -891,6 +786,11 @@ func autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.Deplo return nil } +// Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy is an autogenerated conversion function. +func Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in, out, s) +} + func autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in *v1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { @@ -1005,6 +905,11 @@ func autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1.ReplicaSetSpec, return nil } +// Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec is an autogenerated conversion function. +func Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in, out, s) +} + func autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -1017,6 +922,11 @@ func autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpe return nil } +// Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec is an autogenerated conversion function. +func Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in, out, s) +} + func autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas @@ -1048,27 +958,59 @@ func Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetSta } func autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } +// Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet is an autogenerated conversion function. +func Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { + return autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in, out, s) +} + func autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } +// Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet is an autogenerated conversion function. +func Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in, out, s) +} + func autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s) +} + func autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in, out, s) +} + func autoConvert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { return err @@ -1213,6 +1155,11 @@ func autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1.StatefulSetSp return nil } +// Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function. +func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s) +} + func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1.StatefulSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -1231,6 +1178,11 @@ func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSet return nil } +// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec is an autogenerated conversion function. +func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in, out, s) +} + func autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil { return err @@ -1246,6 +1198,11 @@ func autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1.StatefulS return nil } +// Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus is an autogenerated conversion function. +func Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s) +} + func autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *v1.StatefulSetStatus, s conversion.Scope) error { if err := metav1.Convert_Pointer_int64_To_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil { return err @@ -1261,6 +1218,11 @@ func autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.Statefu return nil } +// Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus is an autogenerated conversion function. +func Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *v1.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in, out, s) +} + func autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { out.Type = apps.StatefulSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -1275,6 +1237,11 @@ func autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy( return nil } +// Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s) +} + func autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1.StatefulSetUpdateStrategy, s conversion.Scope) error { out.Type = v1.StatefulSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -1288,3 +1255,8 @@ func autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy( } return nil } + +// Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go index 15804ad9bbf..ba3fbe40e17 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go @@ -20,46 +20,16 @@ import ( "fmt" appsv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" - api "k8s.io/kubernetes/pkg/apis/core" - k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions to handle the *int32 -> int32 - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, - Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, - Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, - Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy, - // extensions - // TODO: below conversions should be dropped in favor of auto-generated - // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, - Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, - Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, - Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - ) - if err != nil { - return err - } - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"), + if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"), func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": @@ -67,118 +37,13 @@ func addConversionFuncs(scheme *runtime.Scheme) error { default: return "", "", fmt.Errorf("field label not supported for appsv1beta1.StatefulSet: %s", label) } - }) - if err != nil { + }); err != nil { return err } return nil } -func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if err := Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) - return nil -} - -func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta1.StatefulSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = appsv1beta1.PodManagementPolicyType(in.PodManagementPolicy) - if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = apps.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = *in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta1.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = appsv1beta1.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1beta1.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = new(int32) - *out.RollingUpdate.Partition = in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = int32(in.Replicas) out.TargetSelector = in.Selector @@ -211,110 +76,3 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.Scal } return nil } - -func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.RevisionHistoryLimit = in.RevisionHistoryLimit - out.MinReadySeconds = in.MinReadySeconds - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(apps.RollbackConfig) - out.RollbackTo.Revision = in.RollbackTo.Revision - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.Selector = in.Selector - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int32(in.MinReadySeconds) - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(appsv1beta1.RollbackConfig) - out.RollbackTo.Revision = int64(in.RollbackTo.Revision) - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error { - out.Type = appsv1beta1.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1beta1.RollingUpdateDeployment) - if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { - out.Type = apps.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 728db5da923..5b4d63cbd40 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -181,16 +181,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.StatefulSet)(nil), (*apps.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_StatefulSet_To_apps_StatefulSet(a.(*v1beta1.StatefulSet), b.(*apps.StatefulSet), scope) }); err != nil { @@ -251,66 +241,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta1.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta1.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*v1beta1.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } return nil } @@ -535,6 +475,11 @@ func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.Deplo return nil } +// Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec is an autogenerated conversion function. +func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in, out, s) +} + func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -554,6 +499,11 @@ func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.Deployme return nil } +// Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec is an autogenerated conversion function. +func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { + return autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in, out, s) +} + func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas @@ -602,6 +552,11 @@ func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1bet return nil } +// Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function. +func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s) +} + func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { @@ -616,6 +571,11 @@ func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps. return nil } +// Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy is an autogenerated conversion function. +func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in, out, s) +} + func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil @@ -637,17 +597,35 @@ func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConf } func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s) +} + func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in, out, s) +} + func autoConvert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1beta1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { return err @@ -857,6 +835,11 @@ func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.Sta return nil } +// Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function. +func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s) +} + func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta1.StatefulSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -875,6 +858,11 @@ func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.Statef return nil } +// Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec is an autogenerated conversion function. +func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta1.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in, out, s) +} + func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) out.Replicas = in.Replicas @@ -925,6 +913,11 @@ func autoConvert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStra return nil } +// Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1beta1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s) +} + func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1beta1.StatefulSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta1.StatefulSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -938,3 +931,8 @@ func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStra } return nil } + +// Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1beta1.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go index 327b61292e0..adf0e1dee05 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go @@ -21,60 +21,18 @@ import ( "strconv" appsv1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/apis/apps" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions to handle the *int32 -> int32 - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec, - Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec, - Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, - Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy, - Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet, - Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, - Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus, - Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus, - Convert_v1beta2_Deployment_To_apps_Deployment, - Convert_apps_Deployment_To_v1beta2_Deployment, - Convert_apps_DaemonSet_To_v1beta2_DaemonSet, - Convert_v1beta2_DaemonSet_To_apps_DaemonSet, - Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec, - Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec, - Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy, - Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, - // extensions - // TODO: below conversions should be dropped in favor of auto-generated - // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus, - Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec, - Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec, - Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy, - Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy, - Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, - Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, - Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec, - Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec, - ) - if err != nil { - return err - } - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"), + if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"), func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": @@ -82,177 +40,10 @@ func addConversionFuncs(scheme *runtime.Scheme) error { default: return "", "", fmt.Errorf("field label not supported for appsv1beta2.StatefulSet: %s", label) } - }) - if err != nil { - return err - } - - return nil -} - -func Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if err := Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) - return nil -} - -func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta2.StatefulSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit - } else { - out.RevisionHistoryLimit = nil - } - out.ServiceName = in.ServiceName - out.PodManagementPolicy = appsv1beta2.PodManagementPolicyType(in.PodManagementPolicy) - if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + }); err != nil { return err } - return nil -} -func Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta2.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = apps.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = *in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta2.StatefulSetUpdateStrategy, s conversion.Scope) error { - out.Type = appsv1beta2.StatefulSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1beta2.RollingUpdateStatefulSetStrategy) - out.RollingUpdate.Partition = new(int32) - *out.RollingUpdate.Partition = in.RollingUpdate.Partition - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.ReadyReplicas = in.ReadyReplicas - out.CurrentReplicas = in.CurrentReplicas - out.UpdatedReplicas = in.UpdatedReplicas - out.CurrentRevision = in.CurrentRevision - out.UpdateRevision = in.UpdateRevision - if in.CollisionCount != nil { - out.CollisionCount = new(int32) - *out.CollisionCount = *in.CollisionCount - } - out.Conditions = make([]apps.StatefulSetCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - return nil -} - -func Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1beta2.StatefulSetStatus, s conversion.Scope) error { - if in.ObservedGeneration != nil { - out.ObservedGeneration = *in.ObservedGeneration - } - out.Replicas = in.Replicas - out.ReadyReplicas = in.ReadyReplicas - out.CurrentReplicas = in.CurrentReplicas - out.UpdatedReplicas = in.UpdatedReplicas - out.CurrentRevision = in.CurrentRevision - out.UpdateRevision = in.UpdateRevision - if in.CollisionCount != nil { - out.CollisionCount = new(int32) - *out.CollisionCount = *in.CollisionCount - } - out.Conditions = make([]appsv1beta2.StatefulSetCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } return nil } @@ -336,69 +127,6 @@ func Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSp return nil } -func Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error { - out.Type = appsv1beta2.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(appsv1beta2.RollingUpdateDeployment) - if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { - out.Type = apps.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(apps.RollingUpdateDeployment) - if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { @@ -426,18 +154,6 @@ func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, o return nil } -func Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - func Convert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify annotations below @@ -494,17 +210,6 @@ func Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, return nil } -func Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = appsv1beta2.DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = &appsv1beta2.RollingUpdateDaemonSet{} - if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } - return nil -} - func Convert_v1beta2_DaemonSet_To_apps_DaemonSet(in *appsv1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { @@ -543,17 +248,6 @@ func Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1beta2.DaemonS return nil } -func Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = apps.DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = &apps.RollingUpdateDaemonSet{} - if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } - return nil -} - func deepCopyStringMap(m map[string]string) map[string]string { ret := make(map[string]string, len(m)) for k, v := range m { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go index ae5d8692fa8..fa4bc08c51e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -61,16 +61,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1beta2.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { @@ -91,16 +81,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1beta2.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { @@ -121,16 +101,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta2.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { @@ -151,16 +121,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta2.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { @@ -281,16 +241,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*autoscaling.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.StatefulSet)(nil), (*apps.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_StatefulSet_To_apps_StatefulSet(a.(*v1beta2.StatefulSet), b.(*apps.StatefulSet), scope) }); err != nil { @@ -356,11 +306,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) }); err != nil { @@ -371,46 +316,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1beta2.StatefulSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta2.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) }); err != nil { @@ -421,11 +331,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { @@ -436,51 +341,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta2.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*autoscaling.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta2.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*v1beta2.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*v1beta2.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope) - }); err != nil { - return err - } return nil } @@ -725,6 +595,11 @@ func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy return nil } +// Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s) +} + func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta2.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -739,6 +614,11 @@ func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy return nil } +// Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in, out, s) +} + func autoConvert_v1beta2_Deployment_To_apps_Deployment(in *v1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { @@ -918,6 +798,11 @@ func autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1bet return nil } +// Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function. +func Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s) +} + func autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta2.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { @@ -932,6 +817,11 @@ func autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps. return nil } +// Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy is an autogenerated conversion function. +func Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { + return autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in, out, s) +} + func autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *v1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { @@ -1046,6 +936,11 @@ func autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta2.Repli return nil } +// Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in, out, s) +} + func autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -1058,6 +953,11 @@ func autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaS return nil } +// Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec is an autogenerated conversion function. +func Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in, out, s) +} + func autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas @@ -1089,27 +989,59 @@ func Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaS } func autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } +// Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet is an autogenerated conversion function. +func Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { + return autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in, out, s) +} + func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } +// Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet is an autogenerated conversion function. +func Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in, out, s) +} + func autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s) +} + func autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } +// Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment is an autogenerated conversion function. +func Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in, out, s) +} + func autoConvert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1beta2.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { return err @@ -1319,6 +1251,11 @@ func autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.Sta return nil } +// Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function. +func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s) +} + func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta2.StatefulSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err @@ -1337,6 +1274,11 @@ func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.Statef return nil } +// Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec is an autogenerated conversion function. +func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta2.StatefulSetSpec, s conversion.Scope) error { + return autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in, out, s) +} + func autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil { return err @@ -1352,6 +1294,11 @@ func autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta2 return nil } +// Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus is an autogenerated conversion function. +func Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s) +} + func autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.StatefulSetStatus, out *v1beta2.StatefulSetStatus, s conversion.Scope) error { if err := metav1.Convert_Pointer_int64_To_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil { return err @@ -1367,6 +1314,11 @@ func autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.St return nil } +// Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus is an autogenerated conversion function. +func Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.StatefulSetStatus, out *v1beta2.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in, out, s) +} + func autoConvert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1beta2.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { out.Type = apps.StatefulSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -1381,6 +1333,11 @@ func autoConvert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStra return nil } +// Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1beta2.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s) +} + func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1beta2.StatefulSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta2.StatefulSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { @@ -1394,3 +1351,8 @@ func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStra } return nil } + +// Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1beta2.StatefulSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go index 2ff5732d6db..8088edeb418 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/conversion.go @@ -17,10 +17,17 @@ limitations under the License. package v1 import ( - "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/api/authentication/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + authentication "k8s.io/kubernetes/pkg/apis/authentication" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +// Convert_v1_UserInfo_To_authentication_UserInfo is defined outside the autogenerated file for use by other API packages +func Convert_v1_UserInfo_To_authentication_UserInfo(in *v1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +// Convert_authentication_UserInfo_To_v1_UserInfo is defined outside the autogenerated file for use by other API packages +func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *v1.UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go index 6dbb52de445..b2f22b9a356 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go index 8e95da3274b..9bab47ac21d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -108,13 +108,13 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.UserInfo)(nil), (*authentication.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_UserInfo_To_authentication_UserInfo(a.(*v1.UserInfo), b.(*authentication.UserInfo), scope) + if err := s.AddConversionFunc((*authentication.UserInfo)(nil), (*v1.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_authentication_UserInfo_To_v1_UserInfo(a.(*authentication.UserInfo), b.(*v1.UserInfo), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*authentication.UserInfo)(nil), (*v1.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_authentication_UserInfo_To_v1_UserInfo(a.(*authentication.UserInfo), b.(*v1.UserInfo), scope) + if err := s.AddConversionFunc((*v1.UserInfo)(nil), (*authentication.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_UserInfo_To_authentication_UserInfo(a.(*v1.UserInfo), b.(*authentication.UserInfo), scope) }); err != nil { return err } @@ -321,11 +321,6 @@ func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *v1.UserInfo, out *au return nil } -// Convert_v1_UserInfo_To_authentication_UserInfo is an autogenerated conversion function. -func Convert_v1_UserInfo_To_authentication_UserInfo(in *v1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) -} - func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *v1.UserInfo, s conversion.Scope) error { out.Username = in.Username out.UID = in.UID @@ -333,8 +328,3 @@ func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserI out.Extra = *(*map[string]v1.ExtraValue)(unsafe.Pointer(&in.Extra)) return nil } - -// Convert_authentication_UserInfo_To_v1_UserInfo is an autogenerated conversion function. -func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *v1.UserInfo, s conversion.Scope) error { - return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD index 1b045623f99..96b0fb0f664 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "conversion.go", "defaults.go", "doc.go", "register.go", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go index 715a367ea4c..b0fe3e57ed3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go @@ -42,5 +42,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD index e5c392a18cd..a0bec4599c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "conversion.go", "defaults.go", "doc.go", "register.go", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go index fa7e00aaf9a..2f6865b4286 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go @@ -33,13 +33,16 @@ func Resource(resource string) schema.GroupResource { } var ( + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. localSchemeBuilder = &authorizationv1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD index 36cb3831310..210a92b0c9b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "conversion.go", "defaults.go", "doc.go", "register.go", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go index 399f20873f1..5e0b1d0d717 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go @@ -42,5 +42,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go index bd461a02fe6..43f42d64ec9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -100,7 +100,7 @@ type HorizontalPodAutoscalerSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -228,7 +228,7 @@ type MetricTarget struct { // "Value", "AverageValue", or "Utilization" type MetricTargetType string -var ( +const ( // UtilizationMetricType is a possible value for MetricTarget.Type. UtilizationMetricType MetricTargetType = "Utilization" // ValueMetricType is a possible value for MetricTarget.Type. @@ -282,7 +282,7 @@ const ( // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go index a62a3339475..8aca1c12522 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go @@ -23,44 +23,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/core" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource, - Convert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource, - Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource, - Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, - Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource, - Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource, - Convert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus, - Convert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus, - Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus, - Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, - Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus, - Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, - Convert_autoscaling_MetricTarget_To_v1_CrossVersionObjectReference, - Convert_v1_CrossVersionObjectReference_To_autoscaling_MetricTarget, - Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus, - Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, - ) - if err != nil { - return err - } - - return nil -} - func Convert_autoscaling_MetricTarget_To_v1_CrossVersionObjectReference(in *autoscaling.MetricTarget, out *autoscalingv1.CrossVersionObjectReference, s conversion.Scope) error { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go index 957d0e99c6a..be246346d7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go index 93dfa7b7dac..d18511be4f1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -49,36 +49,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*v1.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*v1.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*v1.ExternalMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*v1.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*v1.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*v1.ExternalMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*v1.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*v1.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*v1.HorizontalPodAutoscaler), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*v1.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope) }); err != nil { @@ -99,26 +69,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*v1.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*v1.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*v1.HorizontalPodAutoscalerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*v1.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*v1.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*v1.HorizontalPodAutoscalerStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_MetricSpec_To_autoscaling_MetricSpec(a.(*v1.MetricSpec), b.(*autoscaling.MetricSpec), scope) }); err != nil { @@ -139,66 +89,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*v1.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*v1.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*v1.ObjectMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*v1.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*v1.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*v1.ObjectMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*v1.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricSource)(nil), (*v1.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*v1.PodsMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*v1.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*v1.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*v1.PodsMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*v1.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*v1.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*v1.ResourceMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*v1.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*v1.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*v1.ResourceMetricStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.Scale)(nil), (*autoscaling.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Scale_To_autoscaling_Scale(a.(*v1.Scale), b.(*autoscaling.Scale), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go index 4446e867915..dc03dfe4281 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go @@ -21,42 +21,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource, - Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource, - Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource, - Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, - Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource, - Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource, - Convert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus, - Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus, - Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus, - Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, - Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus, - Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, - Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource, - Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, - Convert_autoscaling_MetricTarget_To_v2beta1_CrossVersionObjectReference, - Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_MetricTarget, - Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus, - Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, - Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler, - Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - ) - if err != nil { - return err - } - - return nil -} - func Convert_autoscaling_MetricTarget_To_v2beta1_CrossVersionObjectReference(in *autoscaling.MetricTarget, out *autoscalingv2beta1.CrossVersionObjectReference, s conversion.Scope) error { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/register.go index da80b2bef79..b2433d34cf8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go index 8c98b810e5d..27fdd2b6403 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go @@ -49,26 +49,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v2beta1.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*v2beta1.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*v2beta1.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*v2beta1.ExternalMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*v2beta1.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*v2beta1.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*v2beta1.ExternalMetricStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v2beta1.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*v2beta1.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope) }); err != nil { @@ -139,66 +119,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v2beta1.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*v2beta1.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*v2beta1.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*v2beta1.ObjectMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*v2beta1.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*v2beta1.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*v2beta1.ObjectMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*v2beta1.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricSource)(nil), (*v2beta1.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*v2beta1.PodsMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*v2beta1.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*v2beta1.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*v2beta1.PodsMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*v2beta1.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*v2beta1.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*v2beta1.ResourceMetricSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v2beta1.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*v2beta1.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*v2beta1.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*v2beta1.ResourceMetricStatus), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*v2beta1.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*v2beta1.ExternalMetricSource), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD index 04a183d724f..6c7d5eb4bbc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD @@ -13,8 +13,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/doc.go index a6be8b596e7..7228e86c609 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v2beta2 // +k8s:defaulter-gen=TypeMeta package v2beta2 // import "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go index 30d43d3b092..1f2ebe74211 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go @@ -21,7 +21,16 @@ limitations under the License. package v2beta2 import ( + unsafe "unsafe" + + v2beta2 "k8s.io/api/autoscaling/v2beta2" + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" ) func init() { @@ -31,5 +40,733 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v2beta2.CrossVersionObjectReference)(nil), (*autoscaling.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(a.(*v2beta2.CrossVersionObjectReference), b.(*autoscaling.CrossVersionObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.CrossVersionObjectReference)(nil), (*v2beta2.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(a.(*autoscaling.CrossVersionObjectReference), b.(*v2beta2.CrossVersionObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*v2beta2.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*v2beta2.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*v2beta2.ExternalMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*v2beta2.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*v2beta2.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*v2beta2.ExternalMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*v2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*v2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*v2beta2.HorizontalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*v2beta2.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerCondition)(nil), (*v2beta2.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(a.(*autoscaling.HorizontalPodAutoscalerCondition), b.(*v2beta2.HorizontalPodAutoscalerCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscalerList)(nil), (*autoscaling.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(a.(*v2beta2.HorizontalPodAutoscalerList), b.(*autoscaling.HorizontalPodAutoscalerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerList)(nil), (*v2beta2.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(a.(*autoscaling.HorizontalPodAutoscalerList), b.(*v2beta2.HorizontalPodAutoscalerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*v2beta2.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*v2beta2.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*v2beta2.HorizontalPodAutoscalerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*v2beta2.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*v2beta2.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*v2beta2.HorizontalPodAutoscalerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.MetricIdentifier)(nil), (*autoscaling.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(a.(*v2beta2.MetricIdentifier), b.(*autoscaling.MetricIdentifier), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.MetricIdentifier)(nil), (*v2beta2.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(a.(*autoscaling.MetricIdentifier), b.(*v2beta2.MetricIdentifier), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(a.(*v2beta2.MetricSpec), b.(*autoscaling.MetricSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.MetricSpec)(nil), (*v2beta2.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(a.(*autoscaling.MetricSpec), b.(*v2beta2.MetricSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.MetricStatus)(nil), (*autoscaling.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(a.(*v2beta2.MetricStatus), b.(*autoscaling.MetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.MetricStatus)(nil), (*v2beta2.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(a.(*autoscaling.MetricStatus), b.(*v2beta2.MetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.MetricTarget)(nil), (*autoscaling.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(a.(*v2beta2.MetricTarget), b.(*autoscaling.MetricTarget), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.MetricTarget)(nil), (*v2beta2.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(a.(*autoscaling.MetricTarget), b.(*v2beta2.MetricTarget), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.MetricValueStatus)(nil), (*autoscaling.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(a.(*v2beta2.MetricValueStatus), b.(*autoscaling.MetricValueStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.MetricValueStatus)(nil), (*v2beta2.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(a.(*autoscaling.MetricValueStatus), b.(*v2beta2.MetricValueStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*v2beta2.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*v2beta2.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*v2beta2.ObjectMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*v2beta2.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*v2beta2.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*v2beta2.ObjectMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*v2beta2.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricSource)(nil), (*v2beta2.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*v2beta2.PodsMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*v2beta2.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*v2beta2.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*v2beta2.PodsMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*v2beta2.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*v2beta2.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*v2beta2.ResourceMetricSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v2beta2.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*v2beta2.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*v2beta2.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*v2beta2.ResourceMetricStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *v2beta2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +// Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference is an autogenerated conversion function. +func Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *v2beta2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *v2beta2.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +// Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference is an autogenerated conversion function. +func Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *v2beta2.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *v2beta2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error { + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource is an autogenerated conversion function. +func Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *v2beta2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error { + return autoConvert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *v2beta2.ExternalMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource is an autogenerated conversion function. +func Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *v2beta2.ExternalMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in, out, s) +} + +func autoConvert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *v2beta2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error { + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus is an autogenerated conversion function. +func Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *v2beta2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error { + return autoConvert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *v2beta2.ExternalMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus is an autogenerated conversion function. +func Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *v2beta2.ExternalMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in, out, s) +} + +func autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *v2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler is an autogenerated conversion function. +func Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *v2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *v2beta2.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler is an autogenerated conversion function. +func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *v2beta2.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *v2beta2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error { + out.Type = autoscaling.HorizontalPodAutoscalerConditionType(in.Type) + out.Status = autoscaling.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition is an autogenerated conversion function. +func Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *v2beta2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error { + return autoConvert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *v2beta2.HorizontalPodAutoscalerCondition, s conversion.Scope) error { + out.Type = v2beta2.HorizontalPodAutoscalerConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition is an autogenerated conversion function. +func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *v2beta2.HorizontalPodAutoscalerCondition, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in, out, s) +} + +func autoConvert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *v2beta2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList is an autogenerated conversion function. +func Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *v2beta2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *v2beta2.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v2beta2.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList is an autogenerated conversion function. +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *v2beta2.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *v2beta2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]autoscaling.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec is an autogenerated conversion function. +func Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *v2beta2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *v2beta2.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]v2beta2.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec is an autogenerated conversion function. +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *v2beta2.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *v2beta2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + out.Conditions = *(*[]autoscaling.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus is an autogenerated conversion function. +func Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *v2beta2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *v2beta2.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]v2beta2.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + out.Conditions = *(*[]v2beta2.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus is an autogenerated conversion function. +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *v2beta2.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *v2beta2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error { + out.Name = in.Name + out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) return nil } + +// Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier is an autogenerated conversion function. +func Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *v2beta2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error { + return autoConvert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in, out, s) +} + +func autoConvert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *v2beta2.MetricIdentifier, s conversion.Scope) error { + out.Name = in.Name + out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) + return nil +} + +// Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier is an autogenerated conversion function. +func Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *v2beta2.MetricIdentifier, s conversion.Scope) error { + return autoConvert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in, out, s) +} + +func autoConvert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in *v2beta2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + out.External = (*autoscaling.ExternalMetricSource)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec is an autogenerated conversion function. +func Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in *v2beta2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in *autoscaling.MetricSpec, out *v2beta2.MetricSpec, s conversion.Scope) error { + out.Type = v2beta2.MetricSourceType(in.Type) + out.Object = (*v2beta2.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*v2beta2.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*v2beta2.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + out.External = (*v2beta2.ExternalMetricSource)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec is an autogenerated conversion function. +func Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in *autoscaling.MetricSpec, out *v2beta2.MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in, out, s) +} + +func autoConvert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in *v2beta2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + out.External = (*autoscaling.ExternalMetricStatus)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus is an autogenerated conversion function. +func Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in *v2beta2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in *autoscaling.MetricStatus, out *v2beta2.MetricStatus, s conversion.Scope) error { + out.Type = v2beta2.MetricSourceType(in.Type) + out.Object = (*v2beta2.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*v2beta2.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*v2beta2.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + out.External = (*v2beta2.ExternalMetricStatus)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus is an autogenerated conversion function. +func Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in *autoscaling.MetricStatus, out *v2beta2.MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in, out, s) +} + +func autoConvert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in *v2beta2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error { + out.Type = autoscaling.MetricTargetType(in.Type) + out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value)) + out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue)) + out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization)) + return nil +} + +// Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget is an autogenerated conversion function. +func Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in *v2beta2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error { + return autoConvert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in, out, s) +} + +func autoConvert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in *autoscaling.MetricTarget, out *v2beta2.MetricTarget, s conversion.Scope) error { + out.Type = v2beta2.MetricTargetType(in.Type) + out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value)) + out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue)) + out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization)) + return nil +} + +// Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget is an autogenerated conversion function. +func Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in *autoscaling.MetricTarget, out *v2beta2.MetricTarget, s conversion.Scope) error { + return autoConvert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in, out, s) +} + +func autoConvert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *v2beta2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error { + out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value)) + out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue)) + out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization)) + return nil +} + +// Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus is an autogenerated conversion function. +func Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *v2beta2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error { + return autoConvert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *v2beta2.MetricValueStatus, s conversion.Scope) error { + out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value)) + out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue)) + out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization)) + return nil +} + +// Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus is an autogenerated conversion function. +func Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *v2beta2.MetricValueStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in, out, s) +} + +func autoConvert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *v2beta2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource is an autogenerated conversion function. +func Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *v2beta2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *v2beta2.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource is an autogenerated conversion function. +func Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *v2beta2.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in, out, s) +} + +func autoConvert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *v2beta2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus is an autogenerated conversion function. +func Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *v2beta2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *v2beta2.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus is an autogenerated conversion function. +func Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *v2beta2.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *v2beta2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource is an autogenerated conversion function. +func Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *v2beta2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *v2beta2.PodsMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource is an autogenerated conversion function. +func Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *v2beta2.PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in, out, s) +} + +func autoConvert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *v2beta2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus is an autogenerated conversion function. +func Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *v2beta2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *v2beta2.PodsMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil { + return err + } + if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus is an autogenerated conversion function. +func Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *v2beta2.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in, out, s) +} + +func autoConvert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *v2beta2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = core.ResourceName(in.Name) + if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource is an autogenerated conversion function. +func Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *v2beta2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *v2beta2.ResourceMetricSource, s conversion.Scope) error { + out.Name = v1.ResourceName(in.Name) + if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource is an autogenerated conversion function. +func Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *v2beta2.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in, out, s) +} + +func autoConvert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *v2beta2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = core.ResourceName(in.Name) + if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus is an autogenerated conversion function. +func Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *v2beta2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *v2beta2.ResourceMetricStatus, s conversion.Scope) error { + out.Name = v1.ResourceName(in.Name) + if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil { + return err + } + return nil +} + +// Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus is an autogenerated conversion function. +func Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *v2beta2.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go index 7c715542367..873876cbfc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go @@ -28,15 +28,6 @@ import ( ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v1_JobSpec, - Convert_v1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - return err - } - return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Job"), func(label, value string) (string, string, error) { switch label { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go index 066f30ee30b..5b66fd3d635 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go @@ -70,16 +70,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.JobSpec)(nil), (*batch.JobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JobSpec_To_batch_JobSpec(a.(*v1.JobSpec), b.(*batch.JobSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*batch.JobSpec)(nil), (*v1.JobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_batch_JobSpec_To_v1_JobSpec(a.(*batch.JobSpec), b.(*v1.JobSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.JobStatus)(nil), (*batch.JobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_JobStatus_To_batch_JobStatus(a.(*v1.JobStatus), b.(*batch.JobStatus), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go index d4ba7fdc925..f6f4dceeef1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go @@ -118,27 +118,27 @@ type CertificateSigningRequestList struct { type KeyUsage string const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content commitment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapSGC KeyUsage = "netscape sgc" + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommitment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapeSGC KeyUsage = "netscape sgc" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS index be77002e9b2..270e4c49962 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS @@ -41,6 +41,5 @@ reviewers: - soltysh - piosz - jsafrane -- jbeda labels: - sig/apps diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index 688287611e8..70b9ff7583b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -26,7 +26,7 @@ const ( // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" // TolerationsAnnotationKey represents the key of tolerations data (json serialized) @@ -56,7 +56,7 @@ const ( // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" @@ -65,7 +65,7 @@ const ( // the kubelet prior to running BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" - // annotation key prefix used to identify non-convertible json paths. + // NonConvertibleAnnotationPrefix annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" kubectlPrefix = "kubectl.kubernetes.io/" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go index 6017bfdab1b..6475fdab1de 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -16,7 +16,7 @@ limitations under the License. // +k8s:deepcopy-gen=package -// Package api contains the latest (or "internal") version of the +// Package core contains the latest (or "internal") version of the // Kubernetes API objects. This is the API objects as represented in memory. // The contract presented to clients is located in the versioned packages, // which are sub-directories. The first one is "v1". Those packages diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index 835e2cbf19b..e93803dd1d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -281,34 +281,6 @@ func IsStandardFinalizerName(str string) bool { return standardFinalizers.Has(str) } -// LoadBalancerStatusEqual checks if the status of the load balancer is equal to the target status -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - // GetAccessModesAsString returns a string representation of an array of access modes. // modes, when present, are always in the same order: RWO,ROX,RWX. func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS index c06f07bdcc9..eb1df912533 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS @@ -9,4 +9,3 @@ reviewers: - nikhiljindal - dims - david-mcmahon -- feihujiang diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/json.go index 937cd056c01..46702cb4681 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/json.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -24,5 +24,8 @@ import "encoding/json" var _ = json.Marshaler(&AvoidPods{}) var _ = json.Unmarshaler(&AvoidPods{}) +// MarshalJSON panics to prevent marshalling of internal structs func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } + +// UnmarshalJSON panics to prevent unmarshalling of internal structs +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go index 55b27f30b6f..60f7e8a88f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -23,12 +23,15 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) +// SetGroupVersionKind sets the API version and kind of the object reference func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } +// GroupVersionKind returns the API version and kind of the object reference func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } +// GetObjectKind returns the kind of object reference func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go index 896670d615e..1dfdb767d51 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go @@ -88,7 +88,8 @@ func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, "spec.schedulerName", "status.phase", "status.hostIP", - "status.podIP": + "status.podIP", + "status.podIPs": return label, value, nil // This is for backwards compatibility with old v1 clients which send spec.host case "spec.host": diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/register.go index c79bee8a8b6..36856ef6cc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -39,8 +39,12 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder object to register various known types SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + + // AddToScheme represents a func that can be used to apply all the registered + // funcs in a scheme + AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go index 1367e00e56e..fe199d3fa0c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -20,35 +20,37 @@ import ( "k8s.io/apimachinery/pkg/api/resource" ) -func (self ResourceName) String() string { - return string(self) +func (rn ResourceName) String() string { + return string(rn) } -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { +// CPU returns the CPU limit if specified. +func (rl *ResourceList) CPU() *resource.Quantity { + if val, ok := (*rl)[ResourceCPU]; ok { return &val } return &resource.Quantity{Format: resource.DecimalSI} } -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { +// Memory returns the Memory limit if specified. +func (rl *ResourceList) Memory() *resource.Quantity { + if val, ok := (*rl)[ResourceMemory]; ok { return &val } return &resource.Quantity{Format: resource.BinarySI} } -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { +// Pods returns the list of pods +func (rl *ResourceList) Pods() *resource.Quantity { + if val, ok := (*rl)[ResourcePods]; ok { return &val } return &resource.Quantity{} } -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { +// StorageEphemeral returns the list of ephemeral storage volumes, if any +func (rl *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*rl)[ResourceEphemeralStorage]; ok { return &val } return &resource.Quantity{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go index 4dea6ad6147..2c800de9b01 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -27,7 +27,7 @@ func (t *Taint) MatchTaint(taintToMatch Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. +// ToString converts taint struct to string in format '=:', '=:', ':', or ''. func (t *Taint) ToString() string { if len(t.Effect) == 0 { if len(t.Value) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 562b632fdff..74d22ae973e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -159,7 +159,7 @@ type VolumeSource struct { CSI *CSIVolumeSource } -// Similar to VolumeSource but meant for the administrator who creates PVs. +// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. // Exactly one of its members must be set. type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a @@ -237,6 +237,7 @@ type PersistentVolumeSource struct { CSI *CSIPersistentVolumeSource } +// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume ClaimName string @@ -257,6 +258,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PersistentVolume struct captures the details of the implementation of PV storage type PersistentVolume struct { metav1.TypeMeta // +optional @@ -271,6 +273,7 @@ type PersistentVolume struct { Status PersistentVolumeStatus } +// PersistentVolumeSpec has most of the details required to define a persistent volume type PersistentVolumeSpec struct { // Resources represents the actual resources of the volume Capacity ResourceList @@ -340,6 +343,7 @@ const ( PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" ) +// PersistentVolumeStatus represents the status of PV storage type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim // +optional @@ -354,6 +358,7 @@ type PersistentVolumeStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PersistentVolumeList represents a list of PVs type PersistentVolumeList struct { metav1.TypeMeta // +optional @@ -380,6 +385,7 @@ type PersistentVolumeClaim struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PersistentVolumeClaimList represents the list of PV claims type PersistentVolumeClaimList struct { metav1.TypeMeta // +optional @@ -428,6 +434,8 @@ type PersistentVolumeClaimSpec struct { DataSource *TypedLocalObjectReference } +// PersistentVolumeClaimConditionType defines the condition of PV claim. +// Valid values are either "Resizing" or "FileSystemResizePending". type PersistentVolumeClaimConditionType string // These are valid conditions of Pvc @@ -438,6 +446,7 @@ const ( PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" ) +// PersistentVolumeClaimCondition represents the current condition of PV claim type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType Status ConditionStatus @@ -451,6 +460,7 @@ type PersistentVolumeClaimCondition struct { Message string } +// PersistentVolumeClaimStatus represents the status of PV claim type PersistentVolumeClaimStatus struct { // Phase represents the current phase of PersistentVolumeClaim // +optional @@ -465,8 +475,10 @@ type PersistentVolumeClaimStatus struct { Conditions []PersistentVolumeClaimCondition } +// PersistentVolumeAccessMode defines various access modes for PV. type PersistentVolumeAccessMode string +// These are the valid values for PersistentVolumeAccessMode const ( // can be mounted read/write mode to exactly 1 host ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" @@ -476,8 +488,10 @@ const ( ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" ) +// PersistentVolumePhase defines the phase in which a PV is type PersistentVolumePhase string +// These are the valid values for PersistentVolumePhase const ( // used for PersistentVolumes that are not available VolumePending PersistentVolumePhase = "Pending" @@ -494,8 +508,10 @@ const ( VolumeFailed PersistentVolumePhase = "Failed" ) +// PersistentVolumeClaimPhase defines the phase of PV claim type PersistentVolumeClaimPhase string +// These are the valid value for PersistentVolumeClaimPhase const ( // used for PersistentVolumeClaims that are not yet bound ClaimPending PersistentVolumeClaimPhase = "Pending" @@ -507,8 +523,10 @@ const ( ClaimLost PersistentVolumeClaimPhase = "Lost" ) +// HostPathType defines the type of host path for PV type HostPathType string +// These are the valid values for HostPathType const ( // For backwards compatible, leave it empty if unset HostPathUnset HostPathType = "" @@ -530,7 +548,7 @@ const ( HostPathBlockDev HostPathType = "BlockDevice" ) -// Represents a host path mapped into a pod. +// HostPathVolumeSource represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { // If the path is a symlink, it will follow the link to the real path. @@ -539,7 +557,7 @@ type HostPathVolumeSource struct { Type *HostPathType } -// Represents an empty directory for a pod. +// EmptyDirVolumeSource represents an empty directory for a pod. // Empty directory volumes support ownership management and SELinux relabeling. type EmptyDirVolumeSource struct { // TODO: Longer term we want to represent the selection of underlying @@ -563,6 +581,7 @@ type EmptyDirVolumeSource struct { // StorageMedium defines ways that storage can be allocated to a volume. type StorageMedium string +// These are the valid value for StorageMedium const ( StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) @@ -581,7 +600,7 @@ const ( ProtocolSCTP Protocol = "SCTP" ) -// Represents a Persistent Disk resource in Google Compute Engine. +// GCEPersistentDiskVolumeSource represents a Persistent Disk resource in Google Compute Engine. // // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD @@ -607,7 +626,7 @@ type GCEPersistentDiskVolumeSource struct { ReadOnly bool } -// Represents an ISCSI disk. +// ISCSIVolumeSource represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { @@ -703,7 +722,7 @@ type ISCSIPersistentVolumeSource struct { InitiatorName *string } -// Represents a Fibre Channel volume. +// FCVolumeSource represents a Fibre Channel volume. // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { @@ -755,7 +774,7 @@ type FlexPersistentVolumeSource struct { Options map[string]string } -// FlexVolume represents a generic volume resource that is +// FlexVolumeSource represents a generic volume resource that is // provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. @@ -781,7 +800,7 @@ type FlexVolumeSource struct { Options map[string]string } -// Represents a Persistent Disk resource in AWS. +// AWSElasticBlockStoreVolumeSource represents a Persistent Disk resource in AWS. // // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk @@ -807,7 +826,7 @@ type AWSElasticBlockStoreVolumeSource struct { ReadOnly bool } -// Represents a volume that is populated with the contents of a git repository. +// GitRepoVolumeSource represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. // @@ -829,7 +848,7 @@ type GitRepoVolumeSource struct { // TODO: Consider credentials here. } -// Adapts a Secret into a volume. +// SecretVolumeSource adapts a Secret into a volume. // // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. @@ -859,7 +878,7 @@ type SecretVolumeSource struct { Optional *bool } -// Adapts a secret into a projected volume. +// SecretProjection adapts a secret into a projected volume. // // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. @@ -881,7 +900,7 @@ type SecretProjection struct { Optional *bool } -// Represents an NFS mount that lasts the lifetime of a pod. +// NFSVolumeSource represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server @@ -896,7 +915,7 @@ type NFSVolumeSource struct { ReadOnly bool } -// Represents a Quobyte mount that lasts the lifetime of a pod. +// QuobyteVolumeSource represents a Quobyte mount that lasts the lifetime of a pod. // Quobyte volumes do not support ownership management or SELinux relabeling. type QuobyteVolumeSource struct { // Registry represents a single or multiple Quobyte Registry services @@ -928,7 +947,7 @@ type QuobyteVolumeSource struct { Tenant string } -// Represents a Glusterfs mount that lasts the lifetime of a pod. +// GlusterfsVolumeSource represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // Required: EndpointsName is the endpoint name that details Glusterfs topology @@ -943,7 +962,7 @@ type GlusterfsVolumeSource struct { ReadOnly bool } -// Represents a Glusterfs mount that lasts the lifetime of a pod. +// GlusterfsPersistentVolumeSource represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsPersistentVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. @@ -967,7 +986,7 @@ type GlusterfsPersistentVolumeSource struct { EndpointsNamespace *string } -// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBDVolumeSource represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // Required: CephMonitors is a collection of Ceph monitors @@ -998,7 +1017,7 @@ type RBDVolumeSource struct { ReadOnly bool } -// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBDPersistentVolumeSource represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // Required: CephMonitors is a collection of Ceph monitors @@ -1029,7 +1048,7 @@ type RBDPersistentVolumeSource struct { ReadOnly bool } -// Represents a cinder volume resource in Openstack. A Cinder volume +// CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume // must exist before mounting to a container. The volume must also be // in the same region as the kubelet. Cinder volumes support ownership // management and SELinux relabeling. @@ -1051,7 +1070,7 @@ type CinderVolumeSource struct { SecretRef *LocalObjectReference } -// Represents a cinder volume resource in Openstack. A Cinder volume +// CinderPersistentVolumeSource represents a cinder volume resource in Openstack. A Cinder volume // must exist before mounting to a container. The volume must also be // in the same region as the kubelet. Cinder volumes support ownership // management and SELinux relabeling. @@ -1073,7 +1092,7 @@ type CinderPersistentVolumeSource struct { SecretRef *SecretReference } -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// CephFSVolumeSource represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors @@ -1107,7 +1126,7 @@ type SecretReference struct { Namespace string } -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// CephFSPersistentVolumeSource represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors @@ -1130,7 +1149,7 @@ type CephFSPersistentVolumeSource struct { ReadOnly bool } -// Represents a Flocker volume mounted by the Flocker agent. +// FlockerVolumeSource represents a Flocker volume mounted by the Flocker agent. // One and only one of datasetName and datasetUUID should be set. // Flocker volumes do not support ownership management or SELinux relabeling. type FlockerVolumeSource struct { @@ -1143,7 +1162,7 @@ type FlockerVolumeSource struct { DatasetUUID string } -// Represents a volume containing downward API info. +// DownwardAPIVolumeSource represents a volume containing downward API info. // Downward API volumes support ownership management and SELinux relabeling. type DownwardAPIVolumeSource struct { // Items is a list of DownwardAPIVolume file @@ -1158,7 +1177,7 @@ type DownwardAPIVolumeSource struct { DefaultMode *int32 } -// Represents a single file containing information from the downward API +// DownwardAPIVolumeFile represents a single file containing information from the downward API type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' Path string @@ -1177,7 +1196,7 @@ type DownwardAPIVolumeFile struct { Mode *int32 } -// Represents downward API info for projecting into a projected volume. +// DownwardAPIProjection represents downward API info for projecting into a projected volume. // Note that this is identical to a downwardAPI volume source without the default // mode. type DownwardAPIProjection struct { @@ -1186,7 +1205,7 @@ type DownwardAPIProjection struct { Items []DownwardAPIVolumeFile } -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +// AzureFileVolumeSource azureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key SecretName string @@ -1198,7 +1217,7 @@ type AzureFileVolumeSource struct { ReadOnly bool } -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +// AzureFilePersistentVolumeSource represents an Azure File Service mount on the host and bind mount to the pod. type AzureFilePersistentVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key SecretName string @@ -1214,7 +1233,7 @@ type AzureFilePersistentVolumeSource struct { SecretNamespace *string } -// Represents a vSphere volume resource. +// VsphereVirtualDiskVolumeSource represents a vSphere volume resource. type VsphereVirtualDiskVolumeSource struct { // Path that identifies vSphere volume vmdk VolumePath string @@ -1231,7 +1250,7 @@ type VsphereVirtualDiskVolumeSource struct { StoragePolicyID string } -// Represents a Photon Controller persistent disk resource. +// PhotonPersistentDiskVolumeSource represents a Photon Controller persistent disk resource. type PhotonPersistentDiskVolumeSource struct { // ID that identifies Photon Controller persistent disk PdID string @@ -1256,9 +1275,13 @@ type PortworxVolumeSource struct { ReadOnly bool } +// AzureDataDiskCachingMode defines the caching mode for Azure data disk type AzureDataDiskCachingMode string + +// AzureDataDiskKind defines the kind of Azure data disk type AzureDataDiskKind string +// Defines cache mode and kinds for Azure data disk const ( AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" @@ -1269,7 +1292,7 @@ const ( AzureManagedDisk AzureDataDiskKind = "Managed" ) -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +// AzureDiskVolumeSource represents an Azure Data Disk mount on the host and bind mount to the pod. type AzureDiskVolumeSource struct { // The Name of the data disk in the blob storage DiskName string @@ -1366,7 +1389,7 @@ type ScaleIOPersistentVolumeSource struct { ReadOnly bool } -// Represents a StorageOS persistent volume resource. +// StorageOSVolumeSource represents a StorageOS persistent volume resource. type StorageOSVolumeSource struct { // VolumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. @@ -1394,7 +1417,7 @@ type StorageOSVolumeSource struct { SecretRef *LocalObjectReference } -// Represents a StorageOS persistent volume resource. +// StorageOSPersistentVolumeSource represents a StorageOS persistent volume resource. type StorageOSPersistentVolumeSource struct { // VolumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. @@ -1422,7 +1445,7 @@ type StorageOSPersistentVolumeSource struct { SecretRef *ObjectReference } -// Adapts a ConfigMap into a volume. +// ConfigMapVolumeSource adapts a ConfigMap into a volume. // // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless @@ -1451,7 +1474,7 @@ type ConfigMapVolumeSource struct { Optional *bool } -// Adapts a ConfigMap into a projected volume. +// ConfigMapProjection adapts a ConfigMap into a projected volume. // // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, @@ -1496,7 +1519,7 @@ type ServiceAccountTokenProjection struct { Path string } -// Represents a projected volume source +// ProjectedVolumeSource represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections Sources []VolumeProjection @@ -1509,7 +1532,7 @@ type ProjectedVolumeSource struct { DefaultMode *int32 } -// Projection that may be projected along with other supported volume types +// VolumeProjection that may be projected along with other supported volume types type VolumeProjection struct { // all types below are the supported types for projection into the same volume @@ -1523,7 +1546,7 @@ type VolumeProjection struct { ServiceAccountToken *ServiceAccountTokenProjection } -// Maps a string key to a path within a volume. +// KeyToPath maps a string key to a path within a volume. type KeyToPath struct { // The key to project. Key string @@ -1541,7 +1564,7 @@ type KeyToPath struct { Mode *int32 } -// Local represents directly-attached storage with node affinity (Beta feature) +// LocalVolumeSource represents directly-attached storage with node affinity (Beta feature) type LocalVolumeSource struct { // The full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -1555,7 +1578,7 @@ type LocalVolumeSource struct { FSType *string } -// Represents storage that is managed by an external CSI volume driver. +// CSIPersistentVolumeSource represents storage that is managed by an external CSI volume driver. type CSIPersistentVolumeSource struct { // Driver is the name of the driver to use for this volume. // Required. @@ -1615,7 +1638,7 @@ type CSIPersistentVolumeSource struct { ControllerExpandSecretRef *SecretReference } -// Represents a source location of a volume to mount, managed by an external CSI driver +// CSIVolumeSource represents a source location of a volume to mount, managed by an external CSI driver type CSIVolumeSource struct { // Driver is the name of the CSI driver that handles this volume. // Consult with your admin for the correct name as registered in the cluster. @@ -1692,7 +1715,6 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is beta in 1.15. // +optional SubPathExpr string } @@ -1753,7 +1775,7 @@ type EnvVar struct { // Only one of its fields may be set. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. // +optional FieldRef *ObjectFieldSelector // Selects a resource of the container: only resources limits and requests @@ -1790,7 +1812,7 @@ type ResourceFieldSelector struct { Divisor resource.Quantity } -// Selects a key from a ConfigMap. +// ConfigMapKeySelector selects a key from a ConfigMap. type ConfigMapKeySelector struct { // The ConfigMap to select from. LocalObjectReference @@ -2106,6 +2128,7 @@ type Lifecycle struct { // The below types are used by kube_client and api_server. +// ConditionStatus defines conditions of resources type ConditionStatus string // These are valid condition statuses. "ConditionTrue" means a resource is in the condition; @@ -2118,6 +2141,7 @@ const ( ConditionUnknown ConditionStatus = "Unknown" ) +// ContainerStateWaiting represents the waiting state of a container type ContainerStateWaiting struct { // A brief CamelCase string indicating details about why the container is in waiting state. // +optional @@ -2127,11 +2151,13 @@ type ContainerStateWaiting struct { Message string } +// ContainerStateRunning represents the running state of a container type ContainerStateRunning struct { // +optional StartedAt metav1.Time } +// ContainerStateTerminated represents the terminated state of a container type ContainerStateTerminated struct { ExitCode int32 // +optional @@ -2160,6 +2186,7 @@ type ContainerState struct { Terminated *ContainerStateTerminated } +// ContainerStatus represents the status of a container type ContainerStatus struct { // Each container in a pod must have a unique name. Name string @@ -2202,6 +2229,7 @@ const ( PodUnknown PodPhase = "Unknown" ) +// PodConditionType defines the condition of pod type PodConditionType string // These are valid conditions of pod. @@ -2220,6 +2248,7 @@ const ( ContainersReady PodConditionType = "ContainersReady" ) +// PodCondition represents pod's condition type PodCondition struct { Type PodConditionType Status ConditionStatus @@ -2239,6 +2268,7 @@ type PodCondition struct { // is RestartPolicyAlways. type RestartPolicy string +// These are valid restart policies const ( RestartPolicyAlways RestartPolicy = "Always" RestartPolicyOnFailure RestartPolicy = "OnFailure" @@ -2280,7 +2310,7 @@ const ( DNSNone DNSPolicy = "None" ) -// A node selector represents the union of the results of one or more label queries +// NodeSelector represents the union of the results of one or more label queries // over a set of nodes; that is, it represents the OR of the selectors represented // by the node selector terms. type NodeSelector struct { @@ -2288,6 +2318,7 @@ type NodeSelector struct { NodeSelectorTerms []NodeSelectorTerm } +// NodeSelectorTerm represents expressions and fields required to select nodes. // A null or empty node selector term matches no objects. The requirements of // them are ANDed. // The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. @@ -2298,7 +2329,7 @@ type NodeSelectorTerm struct { MatchFields []NodeSelectorRequirement } -// A node selector requirement is a selector that contains values, a key, and an operator +// NodeSelectorRequirement is a selector that contains values, a key, and an operator // that relates the key and values. type NodeSelectorRequirement struct { // The label key that the selector applies to. @@ -2315,10 +2346,11 @@ type NodeSelectorRequirement struct { Values []string } -// A node selector operator is the set of operators that can be used in +// NodeSelectorOperator is the set of operators that can be used in // a node selector requirement. type NodeSelectorOperator string +// These are valid values of NodeSelectorOperator const ( NodeSelectorOpIn NodeSelectorOperator = "In" NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" @@ -2328,7 +2360,7 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) -// A topology selector term represents the result of label queries. +// TopologySelectorTerm represents the result of label queries. // A null or empty topology selector term matches no objects. // The requirements of them are ANDed. // It provides a subset of functionality as NodeSelectorTerm. @@ -2339,7 +2371,7 @@ type TopologySelectorTerm struct { MatchLabelExpressions []TopologySelectorLabelRequirement } -// A topology selector requirement is a selector that matches given label. +// TopologySelectorLabelRequirement is a selector that matches given label. // This is an alpha feature and may change in the future. type TopologySelectorLabelRequirement struct { // The label key that the selector applies to. @@ -2362,7 +2394,7 @@ type Affinity struct { PodAntiAffinity *PodAntiAffinity } -// Pod affinity is a group of inter pod affinity scheduling rules. +// PodAffinity is a group of inter pod affinity scheduling rules. type PodAffinity struct { // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. // If the affinity requirements specified by this field are not met at @@ -2397,7 +2429,7 @@ type PodAffinity struct { PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm } -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +// PodAntiAffinity is a group of inter pod anti affinity scheduling rules. type PodAntiAffinity struct { // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. // If the anti-affinity requirements specified by this field are not met at @@ -2432,7 +2464,8 @@ type PodAntiAffinity struct { PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm } -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +// WeightedPodAffinityTerm represents the weights of all of the matched WeightedPodAffinityTerm +// fields are added per-node to find the most preferred node(s) type WeightedPodAffinityTerm struct { // weight associated with matching the corresponding podAffinityTerm, // in the range 1-100. @@ -2441,7 +2474,7 @@ type WeightedPodAffinityTerm struct { PodAffinityTerm PodAffinityTerm } -// Defines a set of pods (namely those matching the labelSelector +// PodAffinityTerm defines a set of pods (namely those matching the labelSelector // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of @@ -2463,7 +2496,7 @@ type PodAffinityTerm struct { TopologyKey string } -// Node affinity is a group of node affinity scheduling rules. +// NodeAffinity is a group of node affinity scheduling rules. type NodeAffinity struct { // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. // If the affinity requirements specified by this field are not met at @@ -2494,7 +2527,7 @@ type NodeAffinity struct { PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm } -// An empty preferred scheduling term matches all objects with implicit weight 0 +// PreferredSchedulingTerm represents an empty preferred scheduling term matches all objects with implicit weight 0 // (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type PreferredSchedulingTerm struct { // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -2503,6 +2536,7 @@ type PreferredSchedulingTerm struct { Preference NodeSelectorTerm } +// Taint represents taint that can be applied to the node. // The node this Taint is attached to has the "effect" on // any pod that does not tolerate the Taint. type Taint struct { @@ -2521,8 +2555,10 @@ type Taint struct { TimeAdded *metav1.Time } +// TaintEffect defines the effects of Taint type TaintEffect string +// These are valid values for TaintEffect const ( // Do not allow new pods to schedule onto the node unless they tolerate the taint, // but allow all pods submitted to Kubelet without going through the scheduler @@ -2544,6 +2580,7 @@ const ( TaintEffectNoExecute TaintEffect = "NoExecute" ) +// Toleration represents the toleration object that can be attached to a pod. // The pod this Toleration is attached to tolerates any taint that matches // the triple using the matching operator . type Toleration struct { @@ -2573,9 +2610,10 @@ type Toleration struct { TolerationSeconds *int64 } -// A toleration operator is the set of operators that can be used in a toleration. +// TolerationOperator is the set of operators that can be used in a toleration. type TolerationOperator string +// These are valid values for TolerationOperator const ( TolerationOpExists TolerationOperator = "Exists" TolerationOpEqual TolerationOperator = "Equal" @@ -2774,7 +2812,6 @@ type PodSecurityContext struct { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional ShareProcessNamespace *bool @@ -2838,6 +2875,7 @@ type PodSecurityContext struct { // PodQOSClass defines the supported qos classes of Pods. type PodQOSClass string +// These are valid values for PodQOSClass const ( // PodQOSGuaranteed is the Guaranteed qos class. PodQOSGuaranteed PodQOSClass = "Guaranteed" @@ -2876,6 +2914,7 @@ type PodDNSConfigOption struct { Value *string } +// PodIP represents the IP address of a pod. // IP address information. Each entry includes: // IP: An IP address allocated to the pod. Routable at least within // the cluster. @@ -3170,6 +3209,7 @@ type ReplicationControllerStatus struct { Conditions []ReplicationControllerCondition } +// ReplicationControllerConditionType defines the conditions of a replication controller. type ReplicationControllerConditionType string // These are valid conditions of a replication controller. @@ -3243,7 +3283,7 @@ type ServiceList struct { Items []Service } -// Session Affinity Type string +// ServiceAffinity Type string type ServiceAffinity string const ( @@ -3279,7 +3319,7 @@ type ClientIPConfig struct { TimeoutSeconds *int32 } -// Service Type string describes ingress methods for a service +// ServiceType string describes ingress methods for a service type ServiceType string const ( @@ -3302,7 +3342,7 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// Service External Traffic Policy Type string +// ServiceExternalTrafficPolicyType string type ServiceExternalTrafficPolicyType string const ( @@ -3351,6 +3391,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 ) // ServiceSpec describes the attributes that a user creates on a service @@ -3463,8 +3505,24 @@ type ServiceSpec struct { // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. // +optional IPFamily *IPFamily + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + TopologyKeys []string } +// ServicePort represents the port on which the service is exposed type ServicePort struct { // Optional if only one ServicePort is defined on this service: The // name of this port within the service. This must be a DNS_LABEL. @@ -3657,7 +3715,7 @@ type NodeSpec struct { // Deprecated. Not all kubelets will set this field. Remove field after 1.13. // see: https://issues.k8s.io/61966 // +optional - DoNotUse_ExternalID string + DoNotUseExternalID string } // NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. @@ -3665,6 +3723,7 @@ type NodeConfigSource struct { ConfigMap *ConfigMapNodeConfigSource } +// ConfigMapNodeConfigSource represents the config map of a node type ConfigMapNodeConfigSource struct { // Namespace is the metadata.namespace of the referenced ConfigMap. // This field is required in all cases. @@ -3820,6 +3879,7 @@ type NodeStatus struct { Config *NodeConfigStatus } +// UniqueVolumeName defines the name of attached volume type UniqueVolumeName string // AttachedVolume describes a volume attached to a node @@ -3841,7 +3901,7 @@ type AvoidPods struct { PreferAvoidPods []PreferAvoidPodsEntry } -// Describes a class of pods that should avoid this node. +// PreferAvoidPodsEntry describes a class of pods that should avoid this node. type PreferAvoidPodsEntry struct { // The class of pods. PodSignature PodSignature @@ -3856,7 +3916,7 @@ type PreferAvoidPodsEntry struct { Message string } -// Describes the class of pods that should avoid this node. +// PodSignature describes the class of pods that should avoid this node. // Exactly one field should be set. type PodSignature struct { // Reference to controller whose pods should avoid this node. @@ -3864,7 +3924,7 @@ type PodSignature struct { PodController *metav1.OwnerReference } -// Describe a container image +// ContainerImage describe a container image type ContainerImage struct { // Names by which this image is known. Names []string @@ -3873,6 +3933,7 @@ type ContainerImage struct { SizeBytes int64 } +// NodePhase defines the phase in which a node is in type NodePhase string // These are the valid phases of node. @@ -3885,6 +3946,7 @@ const ( NodeTerminated NodePhase = "Terminated" ) +// NodeConditionType defines node's condition type NodeConditionType string // These are valid conditions of node. Currently, we don't have enough information to decide @@ -3901,6 +3963,7 @@ const ( NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" ) +// NodeCondition represents the node's condition type NodeCondition struct { Type NodeConditionType Status ConditionStatus @@ -3914,8 +3977,10 @@ type NodeCondition struct { Message string } +// NodeAddressType defines the node's address type type NodeAddressType string +// These are valid values of node address type const ( NodeHostName NodeAddressType = "Hostname" NodeExternalIP NodeAddressType = "ExternalIP" @@ -3924,6 +3989,7 @@ const ( NodeInternalDNS NodeAddressType = "InternalDNS" ) +// NodeAddress represents node's address type NodeAddress struct { Type NodeAddressType Address string @@ -3958,11 +4024,11 @@ const ( ) const ( - // Default namespace prefix. + // ResourceDefaultNamespacePrefix is the default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). + // ResourceHugePagesPrefix is the name prefix for huge page resources (alpha). ResourceHugePagesPrefix = "hugepages-" - // Name prefix for storage resource limits + // ResourceAttachableVolumesPrefix is the name prefix for storage resource limits ResourceAttachableVolumesPrefix = "attachable-volumes-" ) @@ -4022,6 +4088,7 @@ type NamespaceStatus struct { Conditions []NamespaceCondition } +// NamespacePhase defines the phase in which the namespace is type NamespacePhase string // These are the valid phases of a namespace. @@ -4058,7 +4125,7 @@ type NamespaceCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// A namespace provides a scope for Names. +// Namespace provides a scope for Names. // Use of multiple namespaces is optional type Namespace struct { metav1.TypeMeta @@ -4101,7 +4168,7 @@ type Binding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +// EphemeralContainers is a list of ephemeral containers used with the Pod ephemeralcontainers subresource. type EphemeralContainers struct { metav1.TypeMeta // +optional @@ -4152,6 +4219,15 @@ type PodLogOptions struct { // log output. This may not display a complete final line of logging, and may return // slightly more or slightly less than the specified limit. LimitBytes *int64 + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4301,12 +4377,14 @@ type TypedLocalObjectReference struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// SerializedReference represents a serialized object reference type SerializedReference struct { metav1.TypeMeta // +optional Reference ObjectReference } +// EventSource represents the source from which an event is generated type EventSource struct { // Component from which the event is generated. // +optional @@ -4394,6 +4472,7 @@ type Event struct { ReportingInstance string } +// EventSeries represents a series ov events type EventSeries struct { // Number of occurrences in this series up to the last heartbeat time Count int32 @@ -4404,8 +4483,10 @@ type EventSeries struct { State EventSeriesState } +// EventSeriesState defines the state of event series type EventSeriesState string +// These are valid values of event series state const ( EventSeriesStateOngoing EventSeriesState = "Ongoing" EventSeriesStateFinished EventSeriesState = "Finished" @@ -4428,15 +4509,15 @@ type EventList struct { // List holds a list of objects, which may not be known by the server. type List metainternalversion.List -// A type of object that is limited +// LimitType defines a type of object that is limited type LimitType string const ( - // Limit that applies to all pods in a namespace + // LimitTypePod defines limit that applies to all pods in a namespace LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace + // LimitTypeContainer defines limit that applies to all containers in a namespace LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace + // LimitTypePersistentVolumeClaim defines limit that applies to all persistent volume claims in a namespace LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" ) @@ -4538,9 +4619,10 @@ const ( DefaultResourceRequestsPrefix = "requests." ) -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +// ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string +// These are valid values for resource quota spec const ( // Match all pod objects where spec.activeDeadlineSeconds ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" @@ -4569,7 +4651,7 @@ type ResourceQuotaSpec struct { ScopeSelector *ScopeSelector } -// A scope selector represents the AND of the selectors represented +// ScopeSelector represents the AND of the selectors represented // by the scoped-resource selector terms. type ScopeSelector struct { // A list of scope selector requirements by scope of the resources. @@ -4577,7 +4659,7 @@ type ScopeSelector struct { MatchExpressions []ScopedResourceSelectorRequirement } -// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// ScopedResourceSelectorRequirement is a selector that contains values, a scope name, and an operator // that relates the scope name and values. type ScopedResourceSelectorRequirement struct { // The name of the scope that the selector applies to. @@ -4593,10 +4675,11 @@ type ScopedResourceSelectorRequirement struct { Values []string } -// A scope selector operator is the set of operators that can be used in +// ScopeSelectorOperator is the set of operators that can be used in // a scope selector requirement. type ScopeSelectorOperator string +// These are the valid values for ScopeSelectorOperator const ( ScopeSelectorOpIn ScopeSelectorOperator = "In" ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" @@ -4664,10 +4747,13 @@ type Secret struct { Type SecretType } +// MaxSecretSize represents the max secret size. const MaxSecretSize = 1 * 1024 * 1024 +// SecretType defines the types of secrets type SecretType string +// These are the valid values for SecretType const ( // SecretTypeOpaque is the default; arbitrary user-defined data SecretTypeOpaque SecretType = "Opaque" @@ -4702,14 +4788,14 @@ const ( // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets DockerConfigKey = ".dockercfg" - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // SecretTypeDockerConfigJSON contains a dockercfg file that follows the same format rules as ~/.docker/config.json // // Required fields: // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + SecretTypeDockerConfigJSON SecretType = "kubernetes.io/dockerconfigjson" - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" + // DockerConfigJSONKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJSONKey = ".dockerconfigjson" // SecretTypeBasicAuth contains data needed for basic authentication. // @@ -4754,6 +4840,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// SecretList represents the list of secrets type SecretList struct { metav1.TypeMeta // +optional @@ -4842,7 +4929,7 @@ const ( PortForwardRequestIDHeader = "requestID" ) -// Type and constants for component health validation. +// ComponentConditionType defines type and constants for component health validation. type ComponentConditionType string // These are the valid conditions for the component. @@ -4850,6 +4937,7 @@ const ( ComponentHealthy ComponentConditionType = "Healthy" ) +// ComponentCondition represents the condition of a component type ComponentCondition struct { Type ComponentConditionType Status ConditionStatus @@ -4873,6 +4961,7 @@ type ComponentStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ComponentStatusList represents the list of component statuses type ComponentStatusList struct { metav1.TypeMeta // +optional @@ -4941,6 +5030,7 @@ type SecurityContext struct { ProcMount *ProcMountType } +// ProcMountType defines the type of proc mount type ProcMountType string const ( @@ -4989,7 +5079,7 @@ type WindowsSecurityContextOptions struct { // Defaults to the user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. // +optional RunAsUserName *string } @@ -5019,9 +5109,11 @@ type RangeAllocation struct { } const ( - // "default-scheduler" is the name of default scheduler. + // DefaultSchedulerName defines the name of default scheduler. DefaultSchedulerName = "default-scheduler" + // DefaultHardPodAffinitySymmetricWeight is the weight of implicit PreferredDuringScheduling affinity rule. + // // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, @@ -5029,6 +5121,8 @@ const ( DefaultHardPodAffinitySymmetricWeight int32 = 1 ) +// UnsatisfiableConstraintAction defines the actions that can be taken for an +// unsatisfiable constraint. type UnsatisfiableConstraintAction string const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS index 02188d9a2f8..68232a5525c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS @@ -36,4 +36,3 @@ reviewers: - krousey - jayunit100 - rootfs -- markturansky diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index 8a5c1c9cb9f..2c78594789f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -31,35 +31,8 @@ import ( ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_core_Pod_To_v1_Pod, - Convert_core_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_core_PodStatus, - Convert_core_PodStatus_To_v1_PodStatus, - Convert_core_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeSpec_To_core_NodeSpec, - Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_core_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_core_Pod, - Convert_v1_PodSpec_To_core_PodSpec, - Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, - Convert_v1_Secret_To_core_Secret, - Convert_v1_ServiceSpec_To_core_ServiceSpec, - Convert_v1_ResourceList_To_core_ResourceList, - Convert_v1_ReplicationController_To_apps_ReplicaSet, - Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec, - Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus, - Convert_apps_ReplicaSet_To_v1_ReplicationController, - Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec, - Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus, - ) - if err != nil { - return err - } - // Add field conversion funcs. - err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"), + err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"), func(label, value string) (string, string, error) { switch label { case "metadata.name", @@ -70,6 +43,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { "spec.serviceAccountName", "status.phase", "status.podIP", + "status.podIPs", "status.nominatedNodeName": return label, value, nil // This is for backwards compatibility with old v1 clients which send spec.host diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index e1dff4fdcf3..7a573a5227c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -135,16 +135,6 @@ func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { return true } -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { - c := &v1.LoadBalancerStatus{} - c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - // GetAccessModesAsString returns a string representation of an array of access modes. // modes, when present, are always in the same order: RWO,ROX,RWX. func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index 8ad0b84c779..17dabb3983a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + url "net/url" unsafe "unsafe" v1 "k8s.io/api/core/v1" @@ -1040,16 +1041,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*v1.NodeSpec), b.(*core.NodeSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.NodeSpec)(nil), (*v1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*v1.NodeSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.NodeStatus)(nil), (*core.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_NodeStatus_To_core_NodeStatus(a.(*v1.NodeStatus), b.(*core.NodeStatus), scope) }); err != nil { @@ -1180,16 +1171,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeSpec)(nil), (*core.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(a.(*v1.PersistentVolumeSpec), b.(*core.PersistentVolumeSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSpec)(nil), (*v1.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(a.(*core.PersistentVolumeSpec), b.(*v1.PersistentVolumeSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeStatus)(nil), (*core.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(a.(*v1.PersistentVolumeStatus), b.(*core.PersistentVolumeStatus), scope) }); err != nil { @@ -1210,16 +1191,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Pod_To_core_Pod(a.(*v1.Pod), b.(*core.Pod), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.Pod)(nil), (*v1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*v1.Pod), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.PodAffinity)(nil), (*core.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PodAffinity_To_core_PodAffinity(a.(*v1.PodAffinity), b.(*core.PodAffinity), scope) }); err != nil { @@ -1380,26 +1351,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSpec_To_core_PodSpec(a.(*v1.PodSpec), b.(*core.PodSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.PodSpec)(nil), (*v1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*v1.PodSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PodStatus)(nil), (*core.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodStatus_To_core_PodStatus(a.(*v1.PodStatus), b.(*core.PodStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.PodStatus)(nil), (*v1.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodStatus_To_v1_PodStatus(a.(*core.PodStatus), b.(*v1.PodStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.PodStatusResult)(nil), (*core.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PodStatusResult_To_core_PodStatusResult(a.(*v1.PodStatusResult), b.(*core.PodStatusResult), scope) }); err != nil { @@ -1430,16 +1381,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*v1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.PodTemplateSpec)(nil), (*v1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*v1.PodTemplateSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*v1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope) }); err != nil { @@ -1570,16 +1511,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*v1.ReplicationControllerSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerStatus)(nil), (*core.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(a.(*v1.ReplicationControllerStatus), b.(*core.ReplicationControllerStatus), scope) }); err != nil { @@ -1700,11 +1631,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Secret_To_core_Secret(a.(*v1.Secret), b.(*core.Secret), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*v1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*v1.Secret), scope) }); err != nil { @@ -2080,6 +2006,41 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_NodeProxyOptions(a.(*url.Values), b.(*v1.NodeProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodAttachOptions(a.(*url.Values), b.(*v1.PodAttachOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodExecOptions(a.(*url.Values), b.(*v1.PodExecOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodLogOptions(a.(*url.Values), b.(*v1.PodLogOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodPortForwardOptions(a.(*url.Values), b.(*v1.PodPortForwardOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodProxyOptions(a.(*url.Values), b.(*v1.PodProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ServiceProxyOptions(a.(*url.Values), b.(*v1.ServiceProxyOptions), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) }); err != nil { @@ -4743,6 +4704,24 @@ func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOpti return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) } +func autoConvert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeProxyOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil { + return err + } + } else { + out.Path = "" + } + return nil +} + +// Convert_url_Values_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_NodeProxyOptions(in, out, s) +} + func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) return nil @@ -4836,7 +4815,7 @@ func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpe out.Unschedulable = in.Unschedulable out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints)) out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - out.DoNotUse_ExternalID = in.DoNotUse_ExternalID + out.DoNotUseExternalID = in.DoNotUseExternalID return nil } @@ -4846,7 +4825,7 @@ func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpe out.Unschedulable = in.Unschedulable out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - out.DoNotUse_ExternalID = in.DoNotUse_ExternalID + out.DoNotUseExternalID = in.DoNotUseExternalID return nil } @@ -5488,6 +5467,52 @@ func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOpti return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) } +func autoConvert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *v1.PodAttachOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdin, s); err != nil { + return err + } + } else { + out.Stdin = false + } + if values, ok := map[string][]string(*in)["stdout"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdout, s); err != nil { + return err + } + } else { + out.Stdout = false + } + if values, ok := map[string][]string(*in)["stderr"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stderr, s); err != nil { + return err + } + } else { + out.Stderr = false + } + if values, ok := map[string][]string(*in)["tty"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.TTY, s); err != nil { + return err + } + } else { + out.TTY = false + } + if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil { + return err + } + } else { + out.Container = "" + } + return nil +} + +// Convert_url_Values_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *v1.PodAttachOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PodAttachOptions(in, out, s) +} + func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { out.Type = core.PodConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) @@ -5594,6 +5619,57 @@ func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, o return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) } +func autoConvert_url_Values_To_v1_PodExecOptions(in *url.Values, out *v1.PodExecOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdin, s); err != nil { + return err + } + } else { + out.Stdin = false + } + if values, ok := map[string][]string(*in)["stdout"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdout, s); err != nil { + return err + } + } else { + out.Stdout = false + } + if values, ok := map[string][]string(*in)["stderr"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stderr, s); err != nil { + return err + } + } else { + out.Stderr = false + } + if values, ok := map[string][]string(*in)["tty"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.TTY, s); err != nil { + return err + } + } else { + out.TTY = false + } + if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil { + return err + } + } else { + out.Container = "" + } + if values, ok := map[string][]string(*in)["command"]; ok && len(values) > 0 { + out.Command = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.Command = nil + } + return nil +} + +// Convert_url_Values_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PodExecOptions(in *url.Values, out *v1.PodExecOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PodExecOptions(in, out, s) +} + func autoConvert_v1_PodIP_To_core_PodIP(in *v1.PodIP, out *core.PodIP, s conversion.Scope) error { out.IP = in.IP return nil @@ -5665,6 +5741,7 @@ func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, ou out.Timestamps = in.Timestamps out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend return nil } @@ -5682,6 +5759,7 @@ func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out.Timestamps = in.Timestamps out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend return nil } @@ -5690,6 +5768,80 @@ func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) } +func autoConvert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil { + return err + } + } else { + out.Container = "" + } + if values, ok := map[string][]string(*in)["follow"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Follow, s); err != nil { + return err + } + } else { + out.Follow = false + } + if values, ok := map[string][]string(*in)["previous"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Previous, s); err != nil { + return err + } + } else { + out.Previous = false + } + if values, ok := map[string][]string(*in)["sinceSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.SinceSeconds, s); err != nil { + return err + } + } else { + out.SinceSeconds = nil + } + if values, ok := map[string][]string(*in)["sinceTime"]; ok && len(values) > 0 { + if err := metav1.Convert_Slice_string_To_Pointer_v1_Time(&values, &out.SinceTime, s); err != nil { + return err + } + } else { + out.SinceTime = nil + } + if values, ok := map[string][]string(*in)["timestamps"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Timestamps, s); err != nil { + return err + } + } else { + out.Timestamps = false + } + if values, ok := map[string][]string(*in)["tailLines"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TailLines, s); err != nil { + return err + } + } else { + out.TailLines = nil + } + if values, ok := map[string][]string(*in)["limitBytes"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.LimitBytes, s); err != nil { + return err + } + } else { + out.LimitBytes = nil + } + if values, ok := map[string][]string(*in)["insecureSkipTLSVerifyBackend"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.InsecureSkipTLSVerifyBackend, s); err != nil { + return err + } + } else { + out.InsecureSkipTLSVerifyBackend = false + } + return nil +} + +// Convert_url_Values_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PodLogOptions(in, out, s) +} + func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) return nil @@ -5710,6 +5862,24 @@ func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.Pod return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) } +func autoConvert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *v1.PodPortForwardOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["ports"]; ok && len(values) > 0 { + if err := metav1.Convert_Slice_string_To_Slice_int32(&values, &out.Ports, s); err != nil { + return err + } + } else { + out.Ports = nil + } + return nil +} + +// Convert_url_Values_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *v1.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PodPortForwardOptions(in, out, s) +} + func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil @@ -5730,6 +5900,24 @@ func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) } +func autoConvert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *v1.PodProxyOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil { + return err + } + } else { + out.Path = "" + } + return nil +} + +// Convert_url_Values_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *v1.PodProxyOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PodProxyOptions(in, out, s) +} + func autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in *v1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { out.ConditionType = core.PodConditionType(in.ConditionType) return nil @@ -7268,6 +7456,24 @@ func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.Service return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) } +func autoConvert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *v1.ServiceProxyOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil { + return err + } + } else { + out.Path = "" + } + return nil +} + +// Convert_url_Values_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *v1.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ServiceProxyOptions(in, out, s) +} + func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) @@ -7283,6 +7489,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor out.PublishNotReadyAddresses = in.PublishNotReadyAddresses out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) out.IPFamily = (*core.IPFamily)(unsafe.Pointer(in.IPFamily)) + out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys)) return nil } @@ -7306,6 +7513,7 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v out.HealthCheckNodePort = in.HealthCheckNodePort out.PublishNotReadyAddresses = in.PublishNotReadyAddresses out.IPFamily = (*v1.IPFamily)(unsafe.Pointer(in.IPFamily)) + out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys)) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS index 4a613d2546b..37f2de1a33a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS @@ -30,9 +30,7 @@ reviewers: - soltysh - piosz - jsafrane -- jbeda - dims - fejta - krousey - rootfs -- markturansky diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index d841ea3223b..4ad241c745b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -1910,9 +1910,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld storageValue, ok := spec.Resources.Requests[core.ResourceStorage] if !ok { allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), "")) + } else if errs := ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage))); len(errs) > 0 { + allErrs = append(allErrs, errs...) } else { allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) - allErrs = append(allErrs, ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) } if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { @@ -2096,7 +2097,10 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString( "spec.nodeName", "spec.serviceAccountName", "status.hostIP", - "status.podIP") + "status.podIP", + // status.podIPs is populated even if IPv6DualStack feature gate + // is not enabled. This will work for single stack and dual stack. + "status.podIPs") var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList { @@ -4052,6 +4056,35 @@ func ValidateService(service *core.Service) field.ErrorList { ports[key] = true } + // Validate TopologyKeys + if len(service.Spec.TopologyKeys) > 0 { + topoPath := specPath.Child("topologyKeys") + // topologyKeys is mutually exclusive with 'externalTrafficPolicy=Local' + if service.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal { + allErrs = append(allErrs, field.Forbidden(topoPath, "may not be specified when `externalTrafficPolicy=Local`")) + } + if len(service.Spec.TopologyKeys) > core.MaxServiceTopologyKeys { + allErrs = append(allErrs, field.TooMany(topoPath, len(service.Spec.TopologyKeys), core.MaxServiceTopologyKeys)) + } + topoKeys := sets.NewString() + for i, key := range service.Spec.TopologyKeys { + keyPath := topoPath.Index(i) + if topoKeys.Has(key) { + allErrs = append(allErrs, field.Duplicate(keyPath, key)) + } + topoKeys.Insert(key) + // "Any" must be the last value specified + if key == v1.TopologyKeyAny && i != len(service.Spec.TopologyKeys)-1 { + allErrs = append(allErrs, field.Invalid(keyPath, key, `"*" must be the last value specified`)) + } + if key != v1.TopologyKeyAny { + for _, msg := range validation.IsQualifiedName(key) { + allErrs = append(allErrs, field.Invalid(keyPath, service.Spec.TopologyKeys, msg)) + } + } + } + } + // Validate SourceRange field and annotation _, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { @@ -4142,6 +4175,7 @@ func validateServiceExternalTrafficFieldsValue(service *core.Service) field.Erro allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal))) } + if service.Spec.HealthCheckNodePort < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, "HealthCheckNodePort must be not less than 0")) @@ -4920,16 +4954,16 @@ func ValidateSecret(secret *core.Secret) field.ErrorList { if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "", err.Error())) } - case core.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[core.DockerConfigJsonKey] + case core.SecretTypeDockerConfigJSON: + dockerConfigJSONBytes, exists := secret.Data[core.DockerConfigJSONKey] if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJsonKey), "")) + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJSONKey), "")) break } // make sure that the content is well-formed json. - if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJsonKey), "", err.Error())) + if err := json.Unmarshal(dockerConfigJSONBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJSONKey), "", err.Error())) } case core.SecretTypeBasicAuth: _, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey] diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index f5954a13a84..534b8dc06fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -5171,6 +5171,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(IPFamily) **out = **in } + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/BUILD index d1347afd1a5..239cdc13236 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/BUILD @@ -32,6 +32,7 @@ filegroup( "//pkg/apis/discovery/fuzzer:all-srcs", "//pkg/apis/discovery/install:all-srcs", "//pkg/apis/discovery/v1alpha1:all-srcs", + "//pkg/apis/discovery/v1beta1:all-srcs", "//pkg/apis/discovery/validation:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/BUILD index 4a43e9cbe88..825bd82ee85 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/BUILD @@ -9,6 +9,7 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/discovery:go_default_library", "//pkg/apis/discovery/v1alpha1:go_default_library", + "//pkg/apis/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/install.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/install.go index 0f2e9a650ef..828f13883d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/install.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/install/install.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/discovery" "k8s.io/kubernetes/pkg/apis/discovery/v1alpha1" + "k8s.io/kubernetes/pkg/apis/discovery/v1beta1" ) func init() { @@ -34,5 +35,6 @@ func init() { func Install(scheme *runtime.Scheme) { utilruntime.Must(discovery.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/types.go index 09af2ea7300..4d3005c11a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/types.go @@ -32,9 +32,13 @@ type EndpointSlice struct { // +optional metav1.ObjectMeta // addressType specifies the type of address carried by this EndpointSlice. - // All addresses in this slice must be the same type. - // +optional - AddressType *AddressType + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic @@ -54,17 +58,26 @@ type AddressType string const ( // AddressTypeIP represents an IP Address. + // This address type has been deprecated and has been replaced by the IPv4 + // and IPv6 adddress types. New resources with this address type will be + // considered invalid. This will be fully removed in 1.18. + // +deprecated AddressTypeIP = AddressType("IP") + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(api.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(api.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") ) // Endpoint represents a single logical "backend" implementing a service. type Endpoint struct { // addresses of this endpoint. The contents of this field are interpreted - // according to the corresponding EndpointSlice addressType field. This - // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers - // (e.g. kube-proxy) must handle different types of addresses in the context - // of their own capabilities. This must contain at least one address but no - // more than 100. + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. // +listType=set Addresses []string // conditions contains information about the current status of the endpoint. @@ -110,11 +123,10 @@ type EndpointPort struct { // The name of this port. All ports in an EndpointSlice must have a unique // name. If the EndpointSlice is dervied from a Kubernetes service, this // corresponds to the Service.ports[].name. - // Name must either be an empty string or pass IANA_SVC_NAME validation: - // * must be no more than 15 characters long - // * may contain only [-a-z0-9] - // * must contain at least one letter [a-z] - // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. Name *string // The IP protocol for this port. // Must be UDP, TCP, or SCTP. @@ -123,6 +135,14 @@ type EndpointPort struct { // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + // +optional + AppProtocol *string } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/defaults.go index a03d63811e7..7efdad3ef44 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/defaults.go @@ -23,21 +23,14 @@ import ( ) var ( - defaultAddressType = discoveryv1alpha1.AddressTypeIP - defaultPortName = "" - defaultProtocol = v1.ProtocolTCP + defaultPortName = "" + defaultProtocol = v1.ProtocolTCP ) func addDefaultingFuncs(scheme *runtime.Scheme) error { return RegisterDefaults(scheme) } -func SetDefaults_EndpointSlice(obj *discoveryv1alpha1.EndpointSlice) { - if obj.AddressType == nil { - obj.AddressType = &defaultAddressType - } -} - func SetDefaults_EndpointPort(obj *discoveryv1alpha1.EndpointPort) { if obj.Name == nil { obj.Name = &defaultPortName diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.conversion.go index 97469829d6d..236ba256733 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.conversion.go @@ -147,6 +147,7 @@ func autoConvert_v1alpha1_EndpointPort_To_discovery_EndpointPort(in *v1alpha1.En out.Name = (*string)(unsafe.Pointer(in.Name)) out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*int32)(unsafe.Pointer(in.Port)) + out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) return nil } @@ -159,6 +160,7 @@ func autoConvert_discovery_EndpointPort_To_v1alpha1_EndpointPort(in *discovery.E out.Name = (*string)(unsafe.Pointer(in.Name)) out.Protocol = (*v1.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*int32)(unsafe.Pointer(in.Port)) + out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) return nil } @@ -169,7 +171,7 @@ func Convert_discovery_EndpointPort_To_v1alpha1_EndpointPort(in *discovery.Endpo func autoConvert_v1alpha1_EndpointSlice_To_discovery_EndpointSlice(in *v1alpha1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - out.AddressType = (*discovery.AddressType)(unsafe.Pointer(in.AddressType)) + out.AddressType = discovery.AddressType(in.AddressType) out.Endpoints = *(*[]discovery.Endpoint)(unsafe.Pointer(&in.Endpoints)) out.Ports = *(*[]discovery.EndpointPort)(unsafe.Pointer(&in.Ports)) return nil @@ -182,7 +184,7 @@ func Convert_v1alpha1_EndpointSlice_To_discovery_EndpointSlice(in *v1alpha1.Endp func autoConvert_discovery_EndpointSlice_To_v1alpha1_EndpointSlice(in *discovery.EndpointSlice, out *v1alpha1.EndpointSlice, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - out.AddressType = (*v1alpha1.AddressType)(unsafe.Pointer(in.AddressType)) + out.AddressType = v1alpha1.AddressType(in.AddressType) out.Endpoints = *(*[]v1alpha1.Endpoint)(unsafe.Pointer(&in.Endpoints)) out.Ports = *(*[]v1alpha1.EndpointPort)(unsafe.Pointer(&in.Ports)) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.defaults.go index 1302b11b440..67686962241 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1alpha1/zz_generated.defaults.go @@ -35,7 +35,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error { } func SetObjectDefaults_EndpointSlice(in *v1alpha1.EndpointSlice) { - SetDefaults_EndpointSlice(in) for i := range in.Ports { a := &in.Ports[i] SetDefaults_EndpointPort(a) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/BUILD new file mode 100644 index 00000000000..58cfb5af334 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/BUILD @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/discovery/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/discovery:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["defaults_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/discovery/install:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/defaults.go new file mode 100644 index 00000000000..47161f1f1c3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/defaults.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + defaultPortName = "" + defaultProtocol = v1.ProtocolTCP +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_EndpointPort(obj *discoveryv1beta1.EndpointPort) { + if obj.Name == nil { + obj.Name = &defaultPortName + } + + if obj.Protocol == nil { + obj.Protocol = &defaultProtocol + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/doc.go new file mode 100644 index 00000000000..495aeb822eb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/discovery +// +k8s:conversion-gen-external-types=k8s.io/api/discovery/v1beta1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/discovery/v1beta1 + +package v1beta1 // import "k8s.io/kubernetes/pkg/apis/discovery/v1beta1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/register.go new file mode 100644 index 00000000000..4f696ca1dd3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &discoveryv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.conversion.go new file mode 100644 index 00000000000..c7eb5cd62da --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.conversion.go @@ -0,0 +1,218 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/discovery/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/kubernetes/pkg/apis/core" + discovery "k8s.io/kubernetes/pkg/apis/discovery" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.Endpoint)(nil), (*discovery.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Endpoint_To_discovery_Endpoint(a.(*v1beta1.Endpoint), b.(*discovery.Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*discovery.Endpoint)(nil), (*v1beta1.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_discovery_Endpoint_To_v1beta1_Endpoint(a.(*discovery.Endpoint), b.(*v1beta1.Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointConditions)(nil), (*discovery.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(a.(*v1beta1.EndpointConditions), b.(*discovery.EndpointConditions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*discovery.EndpointConditions)(nil), (*v1beta1.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(a.(*discovery.EndpointConditions), b.(*v1beta1.EndpointConditions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointPort)(nil), (*discovery.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EndpointPort_To_discovery_EndpointPort(a.(*v1beta1.EndpointPort), b.(*discovery.EndpointPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*discovery.EndpointPort)(nil), (*v1beta1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_discovery_EndpointPort_To_v1beta1_EndpointPort(a.(*discovery.EndpointPort), b.(*v1beta1.EndpointPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointSlice)(nil), (*discovery.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(a.(*v1beta1.EndpointSlice), b.(*discovery.EndpointSlice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*discovery.EndpointSlice)(nil), (*v1beta1.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(a.(*discovery.EndpointSlice), b.(*v1beta1.EndpointSlice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointSliceList)(nil), (*discovery.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(a.(*v1beta1.EndpointSliceList), b.(*discovery.EndpointSliceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*discovery.EndpointSliceList)(nil), (*v1beta1.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(a.(*discovery.EndpointSliceList), b.(*v1beta1.EndpointSliceList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Endpoint_To_discovery_Endpoint(in *v1beta1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil { + return err + } + out.Hostname = (*string)(unsafe.Pointer(in.Hostname)) + out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef)) + out.Topology = *(*map[string]string)(unsafe.Pointer(&in.Topology)) + return nil +} + +// Convert_v1beta1_Endpoint_To_discovery_Endpoint is an autogenerated conversion function. +func Convert_v1beta1_Endpoint_To_discovery_Endpoint(in *v1beta1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error { + return autoConvert_v1beta1_Endpoint_To_discovery_Endpoint(in, out, s) +} + +func autoConvert_discovery_Endpoint_To_v1beta1_Endpoint(in *discovery.Endpoint, out *v1beta1.Endpoint, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + if err := Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil { + return err + } + out.Hostname = (*string)(unsafe.Pointer(in.Hostname)) + out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + out.Topology = *(*map[string]string)(unsafe.Pointer(&in.Topology)) + return nil +} + +// Convert_discovery_Endpoint_To_v1beta1_Endpoint is an autogenerated conversion function. +func Convert_discovery_Endpoint_To_v1beta1_Endpoint(in *discovery.Endpoint, out *v1beta1.Endpoint, s conversion.Scope) error { + return autoConvert_discovery_Endpoint_To_v1beta1_Endpoint(in, out, s) +} + +func autoConvert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in *v1beta1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error { + out.Ready = (*bool)(unsafe.Pointer(in.Ready)) + return nil +} + +// Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions is an autogenerated conversion function. +func Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in *v1beta1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error { + return autoConvert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in, out, s) +} + +func autoConvert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in *discovery.EndpointConditions, out *v1beta1.EndpointConditions, s conversion.Scope) error { + out.Ready = (*bool)(unsafe.Pointer(in.Ready)) + return nil +} + +// Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions is an autogenerated conversion function. +func Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in *discovery.EndpointConditions, out *v1beta1.EndpointConditions, s conversion.Scope) error { + return autoConvert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in, out, s) +} + +func autoConvert_v1beta1_EndpointPort_To_discovery_EndpointPort(in *v1beta1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*int32)(unsafe.Pointer(in.Port)) + out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) + return nil +} + +// Convert_v1beta1_EndpointPort_To_discovery_EndpointPort is an autogenerated conversion function. +func Convert_v1beta1_EndpointPort_To_discovery_EndpointPort(in *v1beta1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error { + return autoConvert_v1beta1_EndpointPort_To_discovery_EndpointPort(in, out, s) +} + +func autoConvert_discovery_EndpointPort_To_v1beta1_EndpointPort(in *discovery.EndpointPort, out *v1beta1.EndpointPort, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Protocol = (*v1.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*int32)(unsafe.Pointer(in.Port)) + out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) + return nil +} + +// Convert_discovery_EndpointPort_To_v1beta1_EndpointPort is an autogenerated conversion function. +func Convert_discovery_EndpointPort_To_v1beta1_EndpointPort(in *discovery.EndpointPort, out *v1beta1.EndpointPort, s conversion.Scope) error { + return autoConvert_discovery_EndpointPort_To_v1beta1_EndpointPort(in, out, s) +} + +func autoConvert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in *v1beta1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.AddressType = discovery.AddressType(in.AddressType) + out.Endpoints = *(*[]discovery.Endpoint)(unsafe.Pointer(&in.Endpoints)) + out.Ports = *(*[]discovery.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice is an autogenerated conversion function. +func Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in *v1beta1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error { + return autoConvert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in, out, s) +} + +func autoConvert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in *discovery.EndpointSlice, out *v1beta1.EndpointSlice, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.AddressType = v1beta1.AddressType(in.AddressType) + out.Endpoints = *(*[]v1beta1.Endpoint)(unsafe.Pointer(&in.Endpoints)) + out.Ports = *(*[]v1beta1.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice is an autogenerated conversion function. +func Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in *discovery.EndpointSlice, out *v1beta1.EndpointSlice, s conversion.Scope) error { + return autoConvert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in, out, s) +} + +func autoConvert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in *v1beta1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]discovery.EndpointSlice)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList is an autogenerated conversion function. +func Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in *v1beta1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error { + return autoConvert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in, out, s) +} + +func autoConvert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in *discovery.EndpointSliceList, out *v1beta1.EndpointSliceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1beta1.EndpointSlice)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList is an autogenerated conversion function. +func Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in *discovery.EndpointSliceList, out *v1beta1.EndpointSliceList, s conversion.Scope) error { + return autoConvert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.defaults.go new file mode 100644 index 00000000000..bfcd0afbf29 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/v1beta1/zz_generated.defaults.go @@ -0,0 +1,49 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1beta1.EndpointSlice{}, func(obj interface{}) { SetObjectDefaults_EndpointSlice(obj.(*v1beta1.EndpointSlice)) }) + scheme.AddTypeDefaultingFunc(&v1beta1.EndpointSliceList{}, func(obj interface{}) { SetObjectDefaults_EndpointSliceList(obj.(*v1beta1.EndpointSliceList)) }) + return nil +} + +func SetObjectDefaults_EndpointSlice(in *v1beta1.EndpointSlice) { + for i := range in.Ports { + a := &in.Ports[i] + SetDefaults_EndpointPort(a) + } +} + +func SetObjectDefaults_EndpointSliceList(in *v1beta1.EndpointSliceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_EndpointSlice(a) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/zz_generated.deepcopy.go index d4c33c5482b..483988691a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/discovery/zz_generated.deepcopy.go @@ -103,6 +103,11 @@ func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { *out = new(int32) **out = **in } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } return } @@ -121,11 +126,6 @@ func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.AddressType != nil { - in, out := &in.AddressType, &out.AddressType - *out = new(AddressType) - **out = **in - } if in.Endpoints != nil { in, out := &in.Endpoints, &out.Endpoints *out = make([]Endpoint, len(*in)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go index ddef6a3b43a..5be725bf02c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go @@ -37,16 +37,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Event_To_core_Event(a.(*v1beta1.Event), b.(*core.Event), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.Event)(nil), (*v1beta1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Event_To_v1beta1_Event(a.(*core.Event), b.(*v1beta1.Event), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EventList_To_core_EventList(a.(*v1beta1.EventList), b.(*core.EventList), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS index 165db06c75a..f2ab71a0187 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS @@ -27,14 +27,12 @@ reviewers: - errordeveloper - madhusudancs - rootfs -- jszczepkowski - mml - resouer - mbohlool - david-mcmahon - therc - pweil- -- tmrts - mqliang - lukaszo - jianhuiz diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 14f59753071..28643aeb371 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -41,6 +41,7 @@ func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } +// Builds new Scheme of known types var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index 472ed99dabc..a2edbb5b09e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -34,7 +34,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Dummy definition +// ReplicationControllerDummy : Dummy definition type ReplicationControllerDummy struct { metav1.TypeMeta } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index 80a83b9b289..5812241ea75 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" @@ -34,45 +33,6 @@ import ( "k8s.io/kubernetes/pkg/apis/networking" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, - Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, - Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, - Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, - Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, - Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec, - Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy, - Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy, - Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule, - Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, - Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList, - Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList, - Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer, - Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, - Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort, - Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, - Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec, - Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, - Convert_v1beta1_IPBlock_To_networking_IPBlock, - Convert_networking_IPBlock_To_v1beta1_IPBlock, - Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule, - Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule, - ) - if err != nil { - return err - } - - return nil -} - func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = int32(in.Replicas) out.TargetSelector = in.Selector diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go index bd6ce8d9f2f..2b5773c158e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go @@ -41,5 +41,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index b1a145dc448..c7d30005bdf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -175,16 +175,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { @@ -195,16 +185,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.FSGroupStrategyOptions)(nil), (*policy.FSGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FSGroupStrategyOptions_To_policy_FSGroupStrategyOptions(a.(*v1beta1.FSGroupStrategyOptions), b.(*policy.FSGroupStrategyOptions), scope) }); err != nil { @@ -255,16 +235,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.IPBlock)(nil), (*networking.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_IPBlock_To_networking_IPBlock(a.(*v1beta1.IPBlock), b.(*networking.IPBlock), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.IPBlock)(nil), (*v1beta1.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_IPBlock_To_v1beta1_IPBlock(a.(*networking.IPBlock), b.(*v1beta1.IPBlock), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.Ingress)(nil), (*networking.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Ingress_To_networking_Ingress(a.(*v1beta1.Ingress), b.(*networking.Ingress), scope) }); err != nil { @@ -345,76 +315,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicy)(nil), (*networking.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(a.(*v1beta1.NetworkPolicy), b.(*networking.NetworkPolicy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicy)(nil), (*v1beta1.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(a.(*networking.NetworkPolicy), b.(*v1beta1.NetworkPolicy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicyEgressRule)(nil), (*networking.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(a.(*v1beta1.NetworkPolicyEgressRule), b.(*networking.NetworkPolicyEgressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyEgressRule)(nil), (*v1beta1.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(a.(*networking.NetworkPolicyEgressRule), b.(*v1beta1.NetworkPolicyEgressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicyIngressRule)(nil), (*networking.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(a.(*v1beta1.NetworkPolicyIngressRule), b.(*networking.NetworkPolicyIngressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyIngressRule)(nil), (*v1beta1.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(a.(*networking.NetworkPolicyIngressRule), b.(*v1beta1.NetworkPolicyIngressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicyList)(nil), (*networking.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(a.(*v1beta1.NetworkPolicyList), b.(*networking.NetworkPolicyList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyList)(nil), (*v1beta1.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(a.(*networking.NetworkPolicyList), b.(*v1beta1.NetworkPolicyList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicyPeer)(nil), (*networking.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(a.(*v1beta1.NetworkPolicyPeer), b.(*networking.NetworkPolicyPeer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPeer)(nil), (*v1beta1.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(a.(*networking.NetworkPolicyPeer), b.(*v1beta1.NetworkPolicyPeer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicyPort)(nil), (*networking.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(a.(*v1beta1.NetworkPolicyPort), b.(*networking.NetworkPolicyPort), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPort)(nil), (*v1beta1.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(a.(*networking.NetworkPolicyPort), b.(*v1beta1.NetworkPolicyPort), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkPolicySpec)(nil), (*networking.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(a.(*v1beta1.NetworkPolicySpec), b.(*networking.NetworkPolicySpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicySpec)(nil), (*v1beta1.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(a.(*networking.NetworkPolicySpec), b.(*v1beta1.NetworkPolicySpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.PodSecurityPolicy)(nil), (*policy.PodSecurityPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_PodSecurityPolicy_To_policy_PodSecurityPolicy(a.(*v1beta1.PodSecurityPolicy), b.(*policy.PodSecurityPolicy), scope) }); err != nil { @@ -475,16 +375,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1beta1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { @@ -515,26 +405,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsGroupStrategyOptions)(nil), (*policy.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(a.(*v1beta1.RunAsGroupStrategyOptions), b.(*policy.RunAsGroupStrategyOptions), scope) }); err != nil { @@ -595,16 +465,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta1.SupplementalGroupsStrategyOptions)(nil), (*policy.SupplementalGroupsStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SupplementalGroupsStrategyOptions_To_policy_SupplementalGroupsStrategyOptions(a.(*v1beta1.SupplementalGroupsStrategyOptions), b.(*policy.SupplementalGroupsStrategyOptions), scope) }); err != nil { @@ -2185,24 +2045,36 @@ func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConf } func autoConvert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } return nil } func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil { + return err + } + if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil { + return err + } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/conversion.go index 013d2a837bd..5036b62b1f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/conversion.go @@ -19,17 +19,9 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/node/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" node "k8s.io/kubernetes/pkg/apis/node" ) -func addConversionFuncs(s *runtime.Scheme) error { - return s.AddConversionFuncs( - Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass, - Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass, - ) -} - // Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass must override the automatic // conversion since we unnested the spec struct after v1alpha1 func Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(in *v1alpha1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/register.go index b48efda70ac..ae147feb41c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/register.go @@ -42,5 +42,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) + localSchemeBuilder.Register() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/zz_generated.conversion.go index d268b813dad..ee08773cf74 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/node/v1alpha1/zz_generated.conversion.go @@ -48,16 +48,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha1.RuntimeClass)(nil), (*node.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(a.(*v1alpha1.RuntimeClass), b.(*node.RuntimeClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*node.RuntimeClass)(nil), (*v1alpha1.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass(a.(*node.RuntimeClass), b.(*v1alpha1.RuntimeClass), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1alpha1.RuntimeClassList)(nil), (*node.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList(a.(*v1alpha1.RuntimeClassList), b.(*node.RuntimeClassList), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go index c82e200a1da..1de340f662e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to the given scheme. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go index 1691d3a0c35..200f62552c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -48,7 +48,7 @@ type PodDisruptionBudgetSpec struct { // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. + // status information is valid only if observedGeneration equals to PDB's object generation. // +optional ObservedGeneration int64 @@ -275,7 +275,8 @@ var AllowAllCapabilities api.Capability = "*" // FSType gives strong typing to different file systems that are used by volumes. type FSType string -var ( +// Exported FSTypes. +const ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go index 2d880a20968..366b29e4dd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go @@ -35,12 +35,16 @@ import ( psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" ) +// ValidatePodDisruptionBudget validates a PodDisruptionBudget and returns an ErrorList +// with any errors. func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList { allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec")) allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...) return allErrs } +// ValidatePodDisruptionBudgetSpec validates a PodDisruptionBudgetSpec and returns an ErrorList +// with any errors. func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -63,6 +67,8 @@ func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPat return allErrs } +// ValidatePodDisruptionBudgetStatus validates a PodDisruptionBudgetStatus and returns an ErrorList +// with any errors. func ValidatePodDisruptionBudgetStatus(status policy.PodDisruptionBudgetStatus, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.PodDisruptionsAllowed), fldPath.Child("podDisruptionsAllowed"))...) @@ -78,6 +84,8 @@ func ValidatePodDisruptionBudgetStatus(status policy.PodDisruptionBudgetStatus, // trailing dashes are allowed. var ValidatePodSecurityPolicyName = apimachineryvalidation.NameIsDNSSubdomain +// ValidatePodSecurityPolicy validates a PodSecurityPolicy and returns an ErrorList +// with any errors. func ValidatePodSecurityPolicy(psp *policy.PodSecurityPolicy) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...) @@ -86,6 +94,8 @@ func ValidatePodSecurityPolicy(psp *policy.PodSecurityPolicy) field.ErrorList { return allErrs } +// ValidatePodSecurityPolicySpec validates a PodSecurityPolicySpec and returns an ErrorList +// with any errors. func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -114,6 +124,8 @@ func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, fldPath * return allErrs } +// ValidatePodSecurityPolicySpecificAnnotations validates annotations and returns an ErrorList +// with any errors. func ValidatePodSecurityPolicySpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -335,10 +347,13 @@ func validatePSPAllowedProcMountTypes(fldPath *field.Path, allowedProcMountTypes } const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]" + +// SysctlPatternFmt is a regex used for matching valid sysctl patterns. const SysctlPatternFmt string = "(" + apivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt var sysctlPatternRegexp = regexp.MustCompile("^" + SysctlPatternFmt + "$") +// IsValidSysctlPattern checks if name is a valid sysctl pattern. func IsValidSysctlPattern(name string) bool { if len(name) > apivalidation.SysctlMaxLength { return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go index 63da516b0b9..c2544881cc4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -147,16 +147,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha1.Subject)(nil), (*rbac.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Subject_To_rbac_Subject(a.(*v1alpha1.Subject), b.(*rbac.Subject), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*rbac.Subject)(nil), (*v1alpha1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_rbac_Subject_To_v1alpha1_Subject(a.(*rbac.Subject), b.(*v1alpha1.Subject), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*rbac.Subject)(nil), (*v1alpha1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_rbac_Subject_To_v1alpha1_Subject(a.(*rbac.Subject), b.(*v1alpha1.Subject), scope) }); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD index f32c1c166ea..8b510f5cf52 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD @@ -1,16 +1,11 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ "doc.go", - "helpers.go", "register.go", "types.go", "zz_generated.deepcopy.go", @@ -45,10 +40,3 @@ filegroup( ], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/BUILD index 9a88744d07c..1624bb9802c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "defaults.go", "doc.go", + "helpers.go", "register.go", "zz_generated.conversion.go", "zz_generated.defaults.go", @@ -17,6 +18,7 @@ go_library( "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/scheduling/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -40,14 +42,19 @@ filegroup( go_test( name = "go_default_test", - srcs = ["defaults_test.go"], + srcs = [ + "defaults_test.go", + "helpers_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", + "//pkg/apis/scheduling:go_default_library", "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/scheduling/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/helpers.go similarity index 67% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/helpers.go index 58b3799751c..d4af4933cfa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1/helpers.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,52 +14,54 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scheduling +package v1 import ( "fmt" + "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/apis/scheduling" ) // SystemPriorityClasses define system priority classes that are auto-created at cluster bootstrapping. // Our API validation logic ensures that any priority class that has a system prefix or its value // is higher than HighestUserDefinablePriority is equal to one of these SystemPriorityClasses. -var systemPriorityClasses = []*PriorityClass{ +var systemPriorityClasses = []*v1.PriorityClass{ { ObjectMeta: metav1.ObjectMeta{ - Name: SystemNodeCritical, + Name: scheduling.SystemNodeCritical, }, - Value: SystemCriticalPriority + 1000, + Value: scheduling.SystemCriticalPriority + 1000, Description: "Used for system critical pods that must not be moved from their current node.", }, { ObjectMeta: metav1.ObjectMeta{ - Name: SystemClusterCritical, + Name: scheduling.SystemClusterCritical, }, - Value: SystemCriticalPriority, + Value: scheduling.SystemCriticalPriority, Description: "Used for system critical pods that must run in the cluster, but can be moved to another node if necessary.", }, } // SystemPriorityClasses returns the list of system priority classes. // NOTE: be careful not to modify any of elements of the returned array directly. -func SystemPriorityClasses() []*PriorityClass { +func SystemPriorityClasses() []*v1.PriorityClass { return systemPriorityClasses } -// IsKnownSystemPriorityClass checks that "pc" is equal to one of the system PriorityClasses. -// It ignores "description", labels, annotations, etc. of the PriorityClass. -func IsKnownSystemPriorityClass(pc *PriorityClass) (bool, error) { - for _, spc := range systemPriorityClasses { - if spc.Name == pc.Name { - if spc.Value != pc.Value { +// IsKnownSystemPriorityClass returns true if there's any of the system priority classes exactly +// matches "name", "value", "globalDefault". otherwise it will return an error. +func IsKnownSystemPriorityClass(name string, value int32, globalDefault bool) (bool, error) { + for _, spc := range SystemPriorityClasses() { + if spc.Name == name { + if spc.Value != value { return false, fmt.Errorf("value of %v PriorityClass must be %v", spc.Name, spc.Value) } - if spc.GlobalDefault != pc.GlobalDefault { + if spc.GlobalDefault != globalDefault { return false, fmt.Errorf("globalDefault of %v PriorityClass must be %v", spc.Name, spc.GlobalDefault) } return true, nil } } - return false, fmt.Errorf("%v is not a known system priority class", pc.Name) + return false, fmt.Errorf("%v is not a known system priority class", name) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/defaults.go index 41327f5fdcf..016407c532d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/defaults.go @@ -19,7 +19,7 @@ package v1alpha1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/register.go index 1cd870af176..ed40537986d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/register.go @@ -42,5 +42,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(RegisterDefaults) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/defaults.go index c35594e4c11..6ffb8a669ec 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/defaults.go @@ -19,7 +19,7 @@ package v1beta1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/register.go index 114e084cbe6..a786bd47452 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/register.go @@ -42,5 +42,5 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(RegisterDefaults) + localSchemeBuilder.Register(addDefaultingFuncs) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go index 642759606b6..78185dfdbdb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go @@ -405,7 +405,7 @@ type VolumeNodeResources struct { // Maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. - // If this field is nil, then the supported number of volumes on this node is unbounded. + // If this field is not specified, then the supported number of volumes on this node is unbounded. // +optional Count *int32 } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go index 377afdd81fd..c6eba732a67 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go @@ -39,6 +39,46 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1.CSINode)(nil), (*storage.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSINode_To_storage_CSINode(a.(*v1.CSINode), b.(*storage.CSINode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.CSINode)(nil), (*v1.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_CSINode_To_v1_CSINode(a.(*storage.CSINode), b.(*v1.CSINode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CSINodeDriver)(nil), (*storage.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSINodeDriver_To_storage_CSINodeDriver(a.(*v1.CSINodeDriver), b.(*storage.CSINodeDriver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.CSINodeDriver)(nil), (*v1.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_CSINodeDriver_To_v1_CSINodeDriver(a.(*storage.CSINodeDriver), b.(*v1.CSINodeDriver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CSINodeList)(nil), (*storage.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSINodeList_To_storage_CSINodeList(a.(*v1.CSINodeList), b.(*storage.CSINodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.CSINodeList)(nil), (*v1.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_CSINodeList_To_v1_CSINodeList(a.(*storage.CSINodeList), b.(*v1.CSINodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CSINodeSpec)(nil), (*storage.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(a.(*v1.CSINodeSpec), b.(*storage.CSINodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.CSINodeSpec)(nil), (*v1.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(a.(*storage.CSINodeSpec), b.(*v1.CSINodeSpec), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.StorageClass)(nil), (*storage.StorageClass)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_StorageClass_To_storage_StorageClass(a.(*v1.StorageClass), b.(*storage.StorageClass), scope) }); err != nil { @@ -119,9 +159,113 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.VolumeNodeResources)(nil), (*storage.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(a.(*v1.VolumeNodeResources), b.(*storage.VolumeNodeResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeNodeResources)(nil), (*v1.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(a.(*storage.VolumeNodeResources), b.(*v1.VolumeNodeResources), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CSINode_To_storage_CSINode(in *v1.CSINode, out *storage.CSINode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_CSINode_To_storage_CSINode is an autogenerated conversion function. +func Convert_v1_CSINode_To_storage_CSINode(in *v1.CSINode, out *storage.CSINode, s conversion.Scope) error { + return autoConvert_v1_CSINode_To_storage_CSINode(in, out, s) +} + +func autoConvert_storage_CSINode_To_v1_CSINode(in *storage.CSINode, out *v1.CSINode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } return nil } +// Convert_storage_CSINode_To_v1_CSINode is an autogenerated conversion function. +func Convert_storage_CSINode_To_v1_CSINode(in *storage.CSINode, out *v1.CSINode, s conversion.Scope) error { + return autoConvert_storage_CSINode_To_v1_CSINode(in, out, s) +} + +func autoConvert_v1_CSINodeDriver_To_storage_CSINodeDriver(in *v1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error { + out.Name = in.Name + out.NodeID = in.NodeID + out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys)) + out.Allocatable = (*storage.VolumeNodeResources)(unsafe.Pointer(in.Allocatable)) + return nil +} + +// Convert_v1_CSINodeDriver_To_storage_CSINodeDriver is an autogenerated conversion function. +func Convert_v1_CSINodeDriver_To_storage_CSINodeDriver(in *v1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error { + return autoConvert_v1_CSINodeDriver_To_storage_CSINodeDriver(in, out, s) +} + +func autoConvert_storage_CSINodeDriver_To_v1_CSINodeDriver(in *storage.CSINodeDriver, out *v1.CSINodeDriver, s conversion.Scope) error { + out.Name = in.Name + out.NodeID = in.NodeID + out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys)) + out.Allocatable = (*v1.VolumeNodeResources)(unsafe.Pointer(in.Allocatable)) + return nil +} + +// Convert_storage_CSINodeDriver_To_v1_CSINodeDriver is an autogenerated conversion function. +func Convert_storage_CSINodeDriver_To_v1_CSINodeDriver(in *storage.CSINodeDriver, out *v1.CSINodeDriver, s conversion.Scope) error { + return autoConvert_storage_CSINodeDriver_To_v1_CSINodeDriver(in, out, s) +} + +func autoConvert_v1_CSINodeList_To_storage_CSINodeList(in *v1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.CSINode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_CSINodeList_To_storage_CSINodeList is an autogenerated conversion function. +func Convert_v1_CSINodeList_To_storage_CSINodeList(in *v1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error { + return autoConvert_v1_CSINodeList_To_storage_CSINodeList(in, out, s) +} + +func autoConvert_storage_CSINodeList_To_v1_CSINodeList(in *storage.CSINodeList, out *v1.CSINodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.CSINode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_storage_CSINodeList_To_v1_CSINodeList is an autogenerated conversion function. +func Convert_storage_CSINodeList_To_v1_CSINodeList(in *storage.CSINodeList, out *v1.CSINodeList, s conversion.Scope) error { + return autoConvert_storage_CSINodeList_To_v1_CSINodeList(in, out, s) +} + +func autoConvert_v1_CSINodeSpec_To_storage_CSINodeSpec(in *v1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error { + out.Drivers = *(*[]storage.CSINodeDriver)(unsafe.Pointer(&in.Drivers)) + return nil +} + +// Convert_v1_CSINodeSpec_To_storage_CSINodeSpec is an autogenerated conversion function. +func Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(in *v1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error { + return autoConvert_v1_CSINodeSpec_To_storage_CSINodeSpec(in, out, s) +} + +func autoConvert_storage_CSINodeSpec_To_v1_CSINodeSpec(in *storage.CSINodeSpec, out *v1.CSINodeSpec, s conversion.Scope) error { + out.Drivers = *(*[]v1.CSINodeDriver)(unsafe.Pointer(&in.Drivers)) + return nil +} + +// Convert_storage_CSINodeSpec_To_v1_CSINodeSpec is an autogenerated conversion function. +func Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(in *storage.CSINodeSpec, out *v1.CSINodeSpec, s conversion.Scope) error { + return autoConvert_storage_CSINodeSpec_To_v1_CSINodeSpec(in, out, s) +} + func autoConvert_v1_StorageClass_To_storage_StorageClass(in *v1.StorageClass, out *storage.StorageClass, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Provisioner = in.Provisioner @@ -365,3 +509,23 @@ func autoConvert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, func Convert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *v1.VolumeError, s conversion.Scope) error { return autoConvert_storage_VolumeError_To_v1_VolumeError(in, out, s) } + +func autoConvert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in *v1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error { + out.Count = (*int32)(unsafe.Pointer(in.Count)) + return nil +} + +// Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources is an autogenerated conversion function. +func Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in *v1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error { + return autoConvert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in, out, s) +} + +func autoConvert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in *storage.VolumeNodeResources, out *v1.VolumeNodeResources, s conversion.Scope) error { + out.Count = (*int32)(unsafe.Pointer(in.Count)) + return nil +} + +// Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources is an autogenerated conversion function. +func Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in *storage.VolumeNodeResources, out *v1.VolumeNodeResources, s conversion.Scope) error { + return autoConvert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions index 8371496c69d..1f266d036f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions @@ -3,20 +3,20 @@ { "SelectorRegexp": "k8s[.]io/utils", "AllowedPrefixes": [ + "k8s.io/utils/exec", + "k8s.io/utils/io", + "k8s.io/utils/keymutex", + "k8s.io/utils/mount", "k8s.io/utils/net", "k8s.io/utils/nsenter", - "k8s.io/utils/io", - "k8s.io/utils/strings", - "k8s.io/utils/exec", "k8s.io/utils/path", - "k8s.io/utils/keymutex" + "k8s.io/utils/strings" ] }, { "SelectorRegexp": "k8s[.]io/kubernetes", "AllowedPrefixes": [ - "k8s.io/kubernetes/pkg/cloudprovider/providers", - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/cloudprovider/providers" ], "ForbiddenPrefixes": [] } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions index 9c1d574230b..3d07bfe4afc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions @@ -26,8 +26,8 @@ "k8s.io/api/batch/v1beta1", "k8s.io/api/certificates/v1beta1", "k8s.io/api/core/v1", - "k8s.io/api/coordination/v1beta1", - "k8s.io/api/discovery/v1alpha1", + "k8s.io/api/coordination/v1", + "k8s.io/api/discovery/v1beta1", "k8s.io/api/extensions/v1beta1", "k8s.io/api/policy/v1beta1", "k8s.io/api/rbac/v1", @@ -118,6 +118,7 @@ { "SelectorRegexp": "k8s[.]io/client-go/", "AllowedPrefixes": [ + "k8s.io/client-go/util/keyutil", "k8s.io/client-go/discovery", "k8s.io/client-go/dynamic", "k8s.io/client-go/informers", @@ -146,9 +147,11 @@ "k8s.io/client-go/listers/autoscaling/v1", "k8s.io/client-go/listers/batch/v1", "k8s.io/client-go/listers/certificates/v1beta1", + "k8s.io/client-go/listers/coordination/v1", "k8s.io/client-go/listers/core/v1", "k8s.io/client-go/listers/discovery/v1alpha1", - "k8s.io/client-go/listers/coordination/v1beta1", + "k8s.io/client-go/listers/discovery/v1beta1", + "k8s.io/client-go/listers/coordination/v1", "k8s.io/client-go/listers/extensions/v1beta1", "k8s.io/client-go/listers/policy/v1beta1", "k8s.io/client-go/listers/rbac/v1", @@ -164,12 +167,12 @@ "k8s.io/client-go/tools/record", "k8s.io/client-go/tools/reference", "k8s.io/client-go/tools/watch", + "k8s.io/client-go/transport", "k8s.io/client-go/util/cert", "k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/retry", - "k8s.io/client-go/util/workqueue", "k8s.io/client-go/util/testing", - "k8s.io/client-go/transport" + "k8s.io/client-go/util/workqueue" ] }, { @@ -188,6 +191,8 @@ "k8s.io/kubernetes/pkg/apis/core/v1", "k8s.io/kubernetes/pkg/apis/core/v1/helper", "k8s.io/kubernetes/pkg/apis/core/validation", + "k8s.io/kubernetes/pkg/apis/discovery", + "k8s.io/kubernetes/pkg/apis/discovery/validation", "k8s.io/kubernetes/pkg/cloudprovider", "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", "k8s.io/kubernetes/pkg/controller", @@ -242,8 +247,6 @@ "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", "k8s.io/kubernetes/pkg/util/hash", "k8s.io/kubernetes/pkg/util/labels", - "k8s.io/kubernetes/pkg/util/metrics", - "k8s.io/kubernetes/pkg/util/mount", "k8s.io/kubernetes/pkg/util/node", "k8s.io/kubernetes/pkg/util/slice", "k8s.io/kubernetes/pkg/util/taints", @@ -260,6 +263,7 @@ "k8s.io/kubernetes/pkg/master/ports", "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/util", + "k8s.io/kubernetes/pkg/scheduler/listers", "k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/util/parsers", "k8s.io/kubernetes/pkg/fieldpath", @@ -282,14 +286,15 @@ "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1", "k8s.io/metrics/pkg/client/custom_metrics", "k8s.io/metrics/pkg/client/external_metrics", - "k8s.io/utils/nsenter", + "k8s.io/utils/exec", "k8s.io/utils/integer", "k8s.io/utils/io", + "k8s.io/utils/mount", + "k8s.io/utils/net", + "k8s.io/utils/nsenter", "k8s.io/utils/path", "k8s.io/utils/pointer", - "k8s.io/utils/exec", - "k8s.io/utils/strings", - "k8s.io/utils/net" + "k8s.io/utils/strings" ] }, { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go index f63afaca6f3..1759e797baa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go @@ -17,15 +17,17 @@ limitations under the License. package controller import ( + "encoding/json" "fmt" "sync" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog" ) @@ -64,7 +66,7 @@ func (m *BaseControllerRefManager) CanAdopt() error { // // No reconciliation will be attempted if the controller is being deleted. func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) { - controllerRef := metav1.GetControllerOf(obj) + controllerRef := metav1.GetControllerOfNoCopy(obj) if controllerRef != nil { if controllerRef.UID != m.Controller.GetUID() { // Owned by someone else. Ignore. @@ -213,11 +215,12 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { } // Note that ValidateOwnerReferences() will reject this patch if another // OwnerReference exists with controller=true. - addControllerPatch := fmt.Sprintf( - `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, - m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.Controller.GetName(), m.Controller.GetUID(), pod.UID) - return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch)) + + patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, pod.UID) + if err != nil { + return err + } + return m.podControl.PatchPod(pod.Namespace, pod.Name, patchBytes) } // ReleasePod sends a patch to free the pod from the control of the controller. @@ -225,8 +228,11 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) - err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) + patchBytes, err := deleteOwnerRefStrategicMergePatch(pod.UID, m.Controller.GetUID()) + if err != nil { + return err + } + err = m.podControl.PatchPod(pod.Namespace, pod.Name, patchBytes) if err != nil { if errors.IsNotFound(err) { // If the pod no longer exists, ignore it. @@ -335,11 +341,11 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er } // Note that ValidateOwnerReferences() will reject this patch if another // OwnerReference exists with controller=true. - addControllerPatch := fmt.Sprintf( - `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, - m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.Controller.GetName(), m.Controller.GetUID(), rs.UID) - return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch)) + patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, rs.UID) + if err != nil { + return err + } + return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, patchBytes) } // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. @@ -347,8 +353,11 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) - err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) + patchBytes, err := deleteOwnerRefStrategicMergePatch(replicaSet.UID, m.Controller.GetUID()) + if err != nil { + return err + } + err = m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, patchBytes) if err != nil { if errors.IsNotFound(err) { // If the ReplicaSet no longer exists, ignore it. @@ -470,11 +479,11 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history } // Note that ValidateOwnerReferences() will reject this patch if another // OwnerReference exists with controller=true. - addControllerPatch := fmt.Sprintf( - `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, - m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.Controller.GetName(), m.Controller.GetUID(), history.UID) - return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch)) + patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, history.UID) + if err != nil { + return err + } + return m.crControl.PatchControllerRevision(history.Namespace, history.Name, patchBytes) } // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. @@ -482,8 +491,12 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) - err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) + patchBytes, err := deleteOwnerRefStrategicMergePatch(history.UID, m.Controller.GetUID()) + if err != nil { + return err + } + + err = m.crControl.PatchControllerRevision(history.Namespace, history.Name, patchBytes) if err != nil { if errors.IsNotFound(err) { // If the ControllerRevision no longer exists, ignore it. @@ -499,3 +512,64 @@ func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(histo } return err } + +type objectForDeleteOwnerRefStrategicMergePatch struct { + Metadata objectMetaForMergePatch `json:"metadata"` +} + +type objectMetaForMergePatch struct { + UID types.UID `json:"uid"` + OwnerReferences []map[string]string `json:"ownerReferences"` +} + +func deleteOwnerRefStrategicMergePatch(dependentUID types.UID, ownerUIDs ...types.UID) ([]byte, error) { + var pieces []map[string]string + for _, ownerUID := range ownerUIDs { + pieces = append(pieces, map[string]string{"$patch": "delete", "uid": string(ownerUID)}) + } + patch := objectForDeleteOwnerRefStrategicMergePatch{ + Metadata: objectMetaForMergePatch{ + UID: dependentUID, + OwnerReferences: pieces, + }, + } + patchBytes, err := json.Marshal(&patch) + if err != nil { + return nil, err + } + return patchBytes, nil +} + +type objectForAddOwnerRefPatch struct { + Metadata objectMetaForPatch `json:"metadata"` +} + +type objectMetaForPatch struct { + OwnerReferences []metav1.OwnerReference `json:"ownerReferences"` + UID types.UID `json:"uid"` +} + +func ownerRefControllerPatch(controller metav1.Object, controllerKind schema.GroupVersionKind, uid types.UID) ([]byte, error) { + blockOwnerDeletion := true + isController := true + addControllerPatch := objectForAddOwnerRefPatch{ + Metadata: objectMetaForPatch{ + UID: uid, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: controllerKind.GroupVersion().String(), + Kind: controllerKind.Kind, + Name: controller.GetName(), + UID: controller.GetUID(), + Controller: &isController, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + }, + }, + } + patchBytes, err := json.Marshal(&addControllerPatch) + if err != nil { + return nil, err + } + return patchBytes, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 025bf9d5ace..ec876e1eb2b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -95,7 +95,8 @@ var UpdateLabelBackoff = wait.Backoff{ } var ( - KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc + KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc + podPhaseToOrdinal = map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2} ) type ResyncPeriodFunc func() time.Duration @@ -577,7 +578,10 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT } newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod) if err != nil { - r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) + // only send an event if the namespace isn't terminating + if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { + r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) + } return err } accessor, err := meta.Accessor(object) @@ -706,9 +710,8 @@ func (s ByLogging) Less(i, j int) bool { return len(s[i].Spec.NodeName) > 0 } // 2. PodRunning < PodUnknown < PodPending - m := map[v1.PodPhase]int{v1.PodRunning: 0, v1.PodUnknown: 1, v1.PodPending: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] + if s[i].Status.Phase != s[j].Status.Phase { + return podPhaseToOrdinal[s[i].Status.Phase] > podPhaseToOrdinal[s[j].Status.Phase] } // 3. ready < not ready if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) { @@ -717,8 +720,12 @@ func (s ByLogging) Less(i, j int) bool { // TODO: take availability into account when we push minReadySeconds information from deployment into pods, // see https://github.com/kubernetes/kubernetes/issues/22065 // 4. Been ready for more time < less time < empty time - if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) { + readyTime1 := podReadyTime(s[i]) + readyTime2 := podReadyTime(s[j]) + if !readyTime1.Equal(readyTime2) { + return afterOrZero(readyTime2, readyTime1) + } } // 5. Pods with containers with higher restart counts < lower restart counts if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { @@ -744,9 +751,8 @@ func (s ActivePods) Less(i, j int) bool { return len(s[i].Spec.NodeName) == 0 } // 2. PodPending < PodUnknown < PodRunning - m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] + if podPhaseToOrdinal[s[i].Status.Phase] != podPhaseToOrdinal[s[j].Status.Phase] { + return podPhaseToOrdinal[s[i].Status.Phase] < podPhaseToOrdinal[s[j].Status.Phase] } // 3. Not ready < ready // If only one of the pods is not ready, the not ready one is smaller @@ -757,8 +763,12 @@ func (s ActivePods) Less(i, j int) bool { // see https://github.com/kubernetes/kubernetes/issues/22065 // 4. Been ready for empty time < less time < more time // If both pods are ready, the latest ready one is smaller - if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) { + readyTime1 := podReadyTime(s[i]) + readyTime2 := podReadyTime(s[j]) + if !readyTime1.Equal(readyTime2) { + return afterOrZero(readyTime1, readyTime2) + } } // 5. Pods with containers with higher restart counts < lower restart counts if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { @@ -771,6 +781,97 @@ func (s ActivePods) Less(i, j int) bool { return false } +// ActivePodsWithRanks is a sortable list of pods and a list of corresponding +// ranks which will be considered during sorting. The two lists must have equal +// length. After sorting, the pods will be ordered as follows, applying each +// rule in turn until one matches: +// +// 1. If only one of the pods is assigned to a node, the pod that is not +// assigned comes before the pod that is. +// 2. If the pods' phases differ, a pending pod comes before a pod whose phase +// is unknown, and a pod whose phase is unknown comes before a running pod. +// 3. If exactly one of the pods is ready, the pod that is not ready comes +// before the ready pod. +// 4. If the pods' ranks differ, the pod with greater rank comes before the pod +// with lower rank. +// 5. If both pods are ready but have not been ready for the same amount of +// time, the pod that has been ready for a shorter amount of time comes +// before the pod that has been ready for longer. +// 6. If one pod has a container that has restarted more than any container in +// the other pod, the pod with the container with more restarts comes +// before the other pod. +// 7. If the pods' creation times differ, the pod that was created more recently +// comes before the older pod. +// +// If none of these rules matches, the second pod comes before the first pod. +// +// The intention of this ordering is to put pods that should be preferred for +// deletion first in the list. +type ActivePodsWithRanks struct { + // Pods is a list of pods. + Pods []*v1.Pod + + // Rank is a ranking of pods. This ranking is used during sorting when + // comparing two pods that are both scheduled, in the same phase, and + // having the same ready status. + Rank []int +} + +func (s ActivePodsWithRanks) Len() int { + return len(s.Pods) +} + +func (s ActivePodsWithRanks) Swap(i, j int) { + s.Pods[i], s.Pods[j] = s.Pods[j], s.Pods[i] + s.Rank[i], s.Rank[j] = s.Rank[j], s.Rank[i] +} + +// Less compares two pods with corresponding ranks and returns true if the first +// one should be preferred for deletion. +func (s ActivePodsWithRanks) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s.Pods[i].Spec.NodeName != s.Pods[j].Spec.NodeName && (len(s.Pods[i].Spec.NodeName) == 0 || len(s.Pods[j].Spec.NodeName) == 0) { + return len(s.Pods[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + if podPhaseToOrdinal[s.Pods[i].Status.Phase] != podPhaseToOrdinal[s.Pods[j].Status.Phase] { + return podPhaseToOrdinal[s.Pods[i].Status.Phase] < podPhaseToOrdinal[s.Pods[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if podutil.IsPodReady(s.Pods[i]) != podutil.IsPodReady(s.Pods[j]) { + return !podutil.IsPodReady(s.Pods[i]) + } + // 4. Doubled up < not doubled up + // If one of the two pods is on the same node as one or more additional + // ready pods that belong to the same replicaset, whichever pod has more + // colocated ready pods is less + if s.Rank[i] != s.Rank[j] { + return s.Rank[i] > s.Rank[j] + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 5. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if podutil.IsPodReady(s.Pods[i]) && podutil.IsPodReady(s.Pods[j]) { + readyTime1 := podReadyTime(s.Pods[i]) + readyTime2 := podReadyTime(s.Pods[j]) + if !readyTime1.Equal(readyTime2) { + return afterOrZero(readyTime1, readyTime2) + } + } + // 6. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s.Pods[i]) != maxContainerRestarts(s.Pods[j]) { + return maxContainerRestarts(s.Pods[i]) > maxContainerRestarts(s.Pods[j]) + } + // 7. Empty creation time pods < newer pods < older pods + if !s.Pods[i].CreationTimestamp.Equal(&s.Pods[j].CreationTimestamp) { + return afterOrZero(&s.Pods[i].CreationTimestamp, &s.Pods[j].CreationTimestamp) + } + return false +} + // afterOrZero checks if time t1 is after time t2; if one of them // is zero, the zero time is seen as after non-zero time. func afterOrZero(t1, t2 *metav1.Time) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/BUILD index cc104a345ba..041d1003766 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/BUILD @@ -11,6 +11,7 @@ go_library( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go index 93e377b506e..c9227484414 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go @@ -19,8 +19,9 @@ package persistentvolume import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -88,7 +89,10 @@ func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelist class, err := classLister.Get(className) if err != nil { - return false, nil + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err } if class.VolumeBindingMode == nil { @@ -203,13 +207,8 @@ func FindMatchingVolume( volumeQty := volume.Spec.Capacity[v1.ResourceStorage] - // check if volumeModes do not match (feature gate protected) - isMismatch, err := CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) - if err != nil { - return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err) - } // filter out mismatching volumeModes - if isMismatch { + if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) { continue } @@ -305,9 +304,22 @@ func FindMatchingVolume( // CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume // and PersistentVolumeClaims -func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) { +func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool { if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - return false, nil + if pvcSpec.VolumeMode != nil && *pvcSpec.VolumeMode == v1.PersistentVolumeBlock { + // Block PVC does not match anything when the feature is off. We explicitly want + // to prevent binding block PVC to filesystem PV. + // The PVC should be ignored by PV controller. + return true + } + if pvSpec.VolumeMode != nil && *pvSpec.VolumeMode == v1.PersistentVolumeBlock { + // Block PV does not match anything when the feature is off. We explicitly want + // to prevent binding block PV to filesystem PVC. + // The PV should be ignored by PV controller. + return true + } + // Both PV + PVC are not block. + return false } // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. @@ -320,7 +332,7 @@ func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1 if pvSpec.VolumeMode != nil { pvVolumeMode = *pvSpec.VolumeMode } - return requestedVolumeMode != pvVolumeMode, nil + return requestedVolumeMode != pvVolumeMode } // CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD index 1e6af527c8b..32affc54ccc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "scheduler_assume_cache.go", - "scheduler_bind_cache_metrics.go", "scheduler_binder.go", "scheduler_binder_cache.go", "scheduler_binder_fake.go", @@ -14,21 +13,25 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller/volume/persistentvolume/util:go_default_library", + "//pkg/controller/volume/scheduling/metrics:go_default_library", + "//pkg/features:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd3:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/component-base/metrics:go_default_library", - "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//staging/src/k8s.io/csi-translation-lib:go_default_library", + "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -46,6 +49,7 @@ go_test( "//pkg/controller:go_default_library", "//pkg/controller/volume/persistentvolume/testing:go_default_library", "//pkg/controller/volume/persistentvolume/util:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -54,11 +58,14 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -72,7 +79,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/controller/volume/scheduling/metrics:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/BUILD similarity index 54% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/BUILD index 6dcb1ab2291..745aedfbe2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/BUILD @@ -2,14 +2,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["csi.pb.go"], - importpath = "k8s.io/kubernetes/pkg/volume/csi/csiv0", + srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/ptypes/wrappers:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_bind_cache_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go similarity index 94% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_bind_cache_metrics.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go index fef24acb0bd..808fd6d5783 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_bind_cache_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go @@ -14,11 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scheduling +package metrics import ( - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -43,7 +41,7 @@ var ( Subsystem: VolumeSchedulerSubsystem, Name: "scheduling_duration_seconds", Help: "Volume scheduling stage latency", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + Buckets: metrics.ExponentialBuckets(1000, 2, 15), StabilityLevel: metrics.ALPHA, }, []string{"operation"}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go index 53d3b9b0605..832bee91bfb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go @@ -185,8 +185,11 @@ func (c *assumeCache) add(obj interface{}) { } objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} - c.store.Update(objInfo) - klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) + if err = c.store.Update(objInfo); err != nil { + klog.Warningf("got error when updating stored object : %v", err) + } else { + klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) + } } func (c *assumeCache) update(oldObj interface{}, newObj interface{}) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go index fbb0b1240e7..21bd947e134 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go @@ -19,23 +19,39 @@ package scheduling import ( "fmt" "sort" + "strings" "time" v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/etcd3" + utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" storagelisters "k8s.io/client-go/listers/storage/v1" + csitrans "k8s.io/csi-translation-lib" + csiplugins "k8s.io/csi-translation-lib/plugins" "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" + "k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics" + "k8s.io/kubernetes/pkg/features" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +// InTreeToCSITranslator contains methods required to check migratable status +// and perform translations from InTree PV's to CSI +type InTreeToCSITranslator interface { + IsPVMigratable(pv *v1.PersistentVolume) bool + GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) + TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) +} + // SchedulerVolumeBinder is used by the scheduler to handle PVC/PV binding // and dynamic provisioning. The binding decisions are integrated into the pod scheduling // workflow so that the PV NodeAffinity is also considered along with the pod's other @@ -102,9 +118,10 @@ type volumeBinder struct { kubeClient clientset.Interface classLister storagelisters.StorageClassLister - nodeInformer coreinformers.NodeInformer - pvcCache PVCAssumeCache - pvCache PVAssumeCache + nodeInformer coreinformers.NodeInformer + csiNodeInformer storageinformers.CSINodeInformer + pvcCache PVCAssumeCache + pvCache PVAssumeCache // Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes. // AssumePodVolumes modifies the bindings again for use in BindPodVolumes. @@ -112,12 +129,15 @@ type volumeBinder struct { // Amount of time to wait for the bind operation to succeed bindTimeout time.Duration + + translator InTreeToCSITranslator } // NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. func NewVolumeBinder( kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, + csiNodeInformer storageinformers.CSINodeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, pvInformer coreinformers.PersistentVolumeInformer, storageClassInformer storageinformers.StorageClassInformer, @@ -127,10 +147,12 @@ func NewVolumeBinder( kubeClient: kubeClient, classLister: storageClassInformer.Lister(), nodeInformer: nodeInformer, + csiNodeInformer: csiNodeInformer, pvcCache: NewPVCAssumeCache(pvcInformer.Informer()), pvCache: NewPVAssumeCache(pvInformer.Informer()), podBindingCache: NewPodBindingCache(), bindTimeout: bindTimeout, + translator: csitrans.New(), } return b @@ -154,9 +176,9 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume boundVolumesSatisfied = true start := time.Now() defer func() { - VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) + metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) if err != nil { - VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() + metrics.VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() } }() @@ -256,9 +278,9 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) start := time.Now() defer func() { - VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) + metrics.VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) if err != nil { - VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() + metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() } }() @@ -332,9 +354,9 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { start := time.Now() defer func() { - VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds()) + metrics.VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds()) if err != nil { - VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() + metrics.VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() } }() @@ -347,10 +369,14 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { return err } - return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) { + err = wait.Poll(time.Second, b.bindTimeout, func() (bool, error) { b, err := b.checkBindings(assumedPod, bindings, claimsToProvision) return b, err }) + if err != nil { + return fmt.Errorf("Failed to bind volumes: %v", err) + } + return nil } func getPodName(pod *v1.Pod) string { @@ -452,6 +478,12 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim return false, fmt.Errorf("failed to get node %q: %v", pod.Spec.NodeName, err) } + csiNode, err := b.csiNodeInformer.Lister().Get(node.Name) + if err != nil { + // TODO: return the error once CSINode is created by default + klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err) + } + // Check for any conditions that might require scheduling retry // When pod is removed from scheduling queue because of deletion or any @@ -480,6 +512,11 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim return false, nil } + pv, err = b.tryTranslatePVToCSI(pv, csiNode) + if err != nil { + return false, fmt.Errorf("failed to translate pv to csi: %v", err) + } + // Check PV's node affinity (the node might not have the proper label) if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) @@ -514,7 +551,10 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim } selectedNode := pvc.Annotations[pvutil.AnnSelectedNode] if selectedNode != pod.Spec.NodeName { - return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName) + // If provisioner fails to provision a volume, selectedNode + // annotation will be removed to signal back to the scheduler to + // retry. + return false, fmt.Errorf("provisioning failed for PVC %q", pvc.Name) } // If the PVC is bound to a PV, check its node affinity @@ -530,6 +570,12 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim } return false, fmt.Errorf("failed to get pv %q from cache: %v", pvc.Spec.VolumeName, err) } + + pv, err = b.tryTranslatePVToCSI(pv, csiNode) + if err != nil { + return false, err + } + if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) } @@ -633,6 +679,12 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV } func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) { + csiNode, err := b.csiNodeInformer.Lister().Get(node.Name) + if err != nil { + // TODO: return the error once CSINode is created by default + klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err) + } + for _, pvc := range claims { pvName := pvc.Spec.VolumeName pv, err := b.pvCache.GetPV(pvName) @@ -640,6 +692,11 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node return false, err } + pv, err = b.tryTranslatePVToCSI(pv, csiNode) + if err != nil { + return false, err + } + err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) @@ -775,3 +832,72 @@ func (a byPVCSize) Less(i, j int) bool { func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) } + +// isCSIMigrationOnForPlugin checks if CSI migrartion is enabled for a given plugin. +func isCSIMigrationOnForPlugin(pluginName string) bool { + switch pluginName { + case csiplugins.AWSEBSInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) + case csiplugins.GCEPDInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) + case csiplugins.AzureDiskInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) + case csiplugins.CinderInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) + } + return false +} + +// isPluginMigratedToCSIOnNode checks if an in-tree plugin has been migrated to a CSI driver on the node. +func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode) bool { + if csiNode == nil { + return false + } + + csiNodeAnn := csiNode.GetAnnotations() + if csiNodeAnn == nil { + return false + } + + var mpaSet sets.String + mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] + if len(mpa) == 0 { + mpaSet = sets.NewString() + } else { + tok := strings.Split(mpa, ",") + mpaSet = sets.NewString(tok...) + } + + return mpaSet.Has(pluginName) +} + +// tryTranslatePVToCSI will translate the in-tree PV to CSI if it meets the criteria. If not, it returns the unmodified in-tree PV. +func (b *volumeBinder) tryTranslatePVToCSI(pv *v1.PersistentVolume, csiNode *storagev1.CSINode) (*v1.PersistentVolume, error) { + if !b.translator.IsPVMigratable(pv) { + return pv, nil + } + + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { + return pv, nil + } + + pluginName, err := b.translator.GetInTreePluginNameFromSpec(pv, nil) + if err != nil { + return nil, fmt.Errorf("could not get plugin name from pv: %v", err) + } + + if !isCSIMigrationOnForPlugin(pluginName) { + return pv, nil + } + + if !isPluginMigratedToCSIOnNode(pluginName, csiNode) { + return pv, nil + } + + transPV, err := b.translator.TranslateInTreePVToCSI(pv) + if err != nil { + return nil, fmt.Errorf("could not translate pv: %v", err) + } + + return transPV, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_cache.go index 5b02412239c..5452e1f349c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_cache.go @@ -19,7 +19,8 @@ package scheduling import ( "sync" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics" ) // PodBindingCache stores PV binding decisions per pod per node. @@ -93,7 +94,7 @@ func (c *podBindingCache) DeleteBindings(pod *v1.Pod) { if _, ok := c.bindingDecisions[podName]; ok { delete(c.bindingDecisions, podName) - VolumeBindingRequestSchedulerBinderCache.WithLabelValues("delete").Inc() + metrics.VolumeBindingRequestSchedulerBinderCache.WithLabelValues("delete").Inc() } } @@ -113,7 +114,7 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b bindings: bindings, provisionings: pvcs, } - VolumeBindingRequestSchedulerBinderCache.WithLabelValues("add").Inc() + metrics.VolumeBindingRequestSchedulerBinderCache.WithLabelValues("add").Inc() } else { decision.bindings = bindings decision.provisionings = pvcs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS index 102750c4593..d9b6fb8c6a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS @@ -23,7 +23,5 @@ reviewers: - david-mcmahon - mfojtik - therc -- lixiaobing10051267 -- goltermann - andrewsykim - mcrute diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD index 8ba6f1e8fe5..f83326497c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD @@ -12,9 +12,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider/aws", deps = [ "//pkg/credentialprovider:go_default_library", - "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/OWNERS index 7b1a9c5feed..b8c5ea16b83 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/OWNERS @@ -4,6 +4,4 @@ reviewers: - justinsb - david-mcmahon - therc -- lixiaobing10051267 -- goltermann - chrislovecnm diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go index a650a7da50b..82a990dc4a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go @@ -33,9 +33,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" + "k8s.io/component-base/version" "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" - "k8s.io/kubernetes/pkg/version" ) var ecrPattern = regexp.MustCompile(`^(\d{12})\.dkr\.ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.amazonaws\.com(\.cn)?$`) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go index a8604b0e261..12a2ad3f3b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go @@ -68,7 +68,6 @@ type acrAuthResponse struct { } // 5 minutes buffer time to allow timeshift between local machine and AAD -const timeShiftBuffer = 300 const userAgentHeader = "User-Agent" const userAgent = "kubernetes-credentialprovider-acr" @@ -92,22 +91,22 @@ func receiveChallengeFromLoginServer(serverAddress string) (*authDirective, erro var challenge *http.Response if challenge, err = client.Do(r); err != nil { - return nil, fmt.Errorf("Error reaching registry endpoint %s, error: %s", challengeURL.String(), err) + return nil, fmt.Errorf("error reaching registry endpoint %s, error: %s", challengeURL.String(), err) } defer challenge.Body.Close() if challenge.StatusCode != 401 { - return nil, fmt.Errorf("Registry did not issue a valid AAD challenge, status: %d", challenge.StatusCode) + return nil, fmt.Errorf("registry did not issue a valid AAD challenge, status: %d", challenge.StatusCode) } var authHeader []string var ok bool if authHeader, ok = challenge.Header["Www-Authenticate"]; !ok { - return nil, fmt.Errorf("Challenge response does not contain header 'Www-Authenticate'") + return nil, fmt.Errorf("challenge response does not contain header 'Www-Authenticate'") } if len(authHeader) != 1 { - return nil, fmt.Errorf("Registry did not issue a valid AAD challenge, authenticate header [%s]", + return nil, fmt.Errorf("registry did not issue a valid AAD challenge, authenticate header [%s]", strings.Join(authHeader, ", ")) } @@ -115,7 +114,7 @@ func receiveChallengeFromLoginServer(serverAddress string) (*authDirective, erro authType := strings.ToLower(authSections[0]) var authParams *map[string]string if authParams, err = parseAssignments(authSections[1]); err != nil { - return nil, fmt.Errorf("Unable to understand the contents of Www-Authenticate header %s", authSections[1]) + return nil, fmt.Errorf("unable to understand the contents of Www-Authenticate header %s", authSections[1]) } // verify headers diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go index 729483a2918..8816f833e55 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -282,7 +282,20 @@ func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) { // decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a // username and a password. The format of the auth field is base64(:). func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { - decoded, err := base64.StdEncoding.DecodeString(field) + + var decoded []byte + + // StdEncoding can only decode padded string + // RawStdEncoding can only decode unpadded string + // a string is correctly padded if and only if its length is a multiple of 4 + if (len(field) % 4) == 0 { + // decode padded data + decoded, err = base64.StdEncoding.DecodeString(field) + } else { + // decode unpadded data + decoded, err = base64.RawStdEncoding.DecodeString(field) + } + if err != nil { return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/doc.go index fbad381e4ba..cab80bc43d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package gcp_credentials contains implementations of DockerConfigProvider +// Package gcp contains implementations of DockerConfigProvider // for Google Cloud Platform. -package gcp_credentials // import "k8s.io/kubernetes/pkg/credentialprovider/gcp" +package gcp // import "k8s.io/kubernetes/pkg/credentialprovider/gcp" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go index 1b5fbcdc779..0bae4fe213f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gcp_credentials +package gcp import ( "encoding/json" @@ -31,14 +31,14 @@ import ( ) const ( - metadataUrl = "http://metadata.google.internal./computeMetadata/v1/" - metadataAttributes = metadataUrl + "instance/attributes/" + metadataURL = "http://metadata.google.internal./computeMetadata/v1/" + metadataAttributes = metadataURL + "instance/attributes/" dockerConfigKey = metadataAttributes + "google-dockercfg" - dockerConfigUrlKey = metadataAttributes + "google-dockercfg-url" - serviceAccounts = metadataUrl + "instance/service-accounts/" - metadataScopes = metadataUrl + "instance/service-accounts/default/scopes" - metadataToken = metadataUrl + "instance/service-accounts/default/token" - metadataEmail = metadataUrl + "instance/service-accounts/default/email" + dockerConfigURLKey = metadataAttributes + "google-dockercfg-url" + serviceAccounts = metadataURL + "instance/service-accounts/" + metadataScopes = metadataURL + "instance/service-accounts/default/scopes" + metadataToken = metadataURL + "instance/service-accounts/default/token" + metadataEmail = metadataURL + "instance/service-accounts/default/email" storageScopePrefix = "https://www.googleapis.com/auth/devstorage" cloudPlatformScopePrefix = "https://www.googleapis.com/auth/cloud-platform" defaultServiceAccount = "default/" @@ -70,7 +70,7 @@ type dockerConfigKeyProvider struct { // A DockerConfigProvider that reads its configuration from a URL read from // a specific Google Compute Engine metadata key: 'google-dockercfg-url'. -type dockerConfigUrlKeyProvider struct { +type dockerConfigURLKeyProvider struct { metadataProvider } @@ -100,7 +100,7 @@ func init() { credentialprovider.RegisterCredentialProvider("google-dockercfg-url", &credentialprovider.CachingDockerConfigProvider{ - Provider: &dockerConfigUrlKeyProvider{ + Provider: &dockerConfigURLKeyProvider{ metadataProvider{Client: httpClient}, }, Lifetime: 60 * time.Second, @@ -160,9 +160,9 @@ func (g *dockerConfigKeyProvider) Provide(image string) credentialprovider.Docke } // Provide implements DockerConfigProvider -func (g *dockerConfigUrlKeyProvider) Provide(image string) credentialprovider.DockerConfig { +func (g *dockerConfigURLKeyProvider) Provide(image string) credentialprovider.DockerConfig { // Read the contents of the google-dockercfg-url key and load a .dockercfg from there - if url, err := credentialprovider.ReadUrl(dockerConfigUrlKey, g.Client, metadataHeader); err != nil { + if url, err := credentialprovider.ReadUrl(dockerConfigURLKey, g.Client, metadataHeader); err != nil { klog.Errorf("while reading 'google-dockercfg-url' metadata: %v", err) } else { if strings.HasPrefix(string(url), "http") { @@ -268,7 +268,7 @@ type tokenBlob struct { func (g *containerRegistryProvider) Provide(image string) credentialprovider.DockerConfig { cfg := credentialprovider.DockerConfig{} - tokenJsonBlob, err := credentialprovider.ReadUrl(metadataToken, g.Client, metadataHeader) + tokenJSONBlob, err := credentialprovider.ReadUrl(metadataToken, g.Client, metadataHeader) if err != nil { klog.Errorf("while reading access token endpoint: %v", err) return cfg @@ -281,8 +281,8 @@ func (g *containerRegistryProvider) Provide(image string) credentialprovider.Doc } var parsedBlob tokenBlob - if err := json.Unmarshal([]byte(tokenJsonBlob), &parsedBlob); err != nil { - klog.Errorf("while parsing json blob %s: %v", tokenJsonBlob, err) + if err := json.Unmarshal([]byte(tokenJSONBlob), &parsedBlob); err != nil { + klog.Errorf("while parsing json blob %s: %v", tokenJSONBlob, err) return cfg } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index a4dfbdd879d..1cf35280df3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -83,14 +83,6 @@ const ( // the API server as the certificate approaches expiration. RotateKubeletClientCertificate featuregate.Feature = "RotateKubeletClientCertificate" - // owner: @msau42 - // alpha: v1.7 - // beta: v1.10 - // ga: v1.14 - // - // A new volume type that supports local disks on a node. - PersistentLocalVolumes featuregate.Feature = "PersistentLocalVolumes" - // owner: @jinxu // beta: v1.10 // @@ -120,7 +112,9 @@ const ( EphemeralContainers featuregate.Feature = "EphemeralContainers" // owner: @verb + // alpha: v1.10 // beta: v1.12 + // GA: v1.17 // // Allows all containers in a pod to share a process namespace. PodShareProcessNamespace featuregate.Feature = "PodShareProcessNamespace" @@ -135,6 +129,7 @@ const ( // owner: @k82cn // beta: v1.12 + // GA: v1.17 // // Taint nodes based on their condition status for 'NetworkUnavailable', // 'MemoryPressure', 'PIDPressure' and 'DiskPressure'. @@ -190,12 +185,6 @@ const ( // Enable nodes to exclude themselves from network disruption checks NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion" - // owner: @jsafrane - // alpha: v1.9 - // - // Enable running mount utilities in containers. - MountContainers featuregate.Feature = "MountContainers" - // owner: @saad-ali // alpha: v1.12 // beta: v1.14 @@ -205,6 +194,7 @@ const ( // owner: @verult // alpha: v1.12 // beta: v1.14 + // ga: v1.17 // Enable all logic related to the CSINode API object in storage.k8s.io CSINodeInfo featuregate.Feature = "CSINodeInfo" @@ -248,6 +238,7 @@ const ( // owner: @k82cn // beta: v1.12 + // GA: v1.17 // // Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller ScheduleDaemonSetPods featuregate.Feature = "ScheduleDaemonSetPods" @@ -293,6 +284,8 @@ const ( // owner: @gnufied // beta : v1.12 + // GA : v1.17 + // // Add support for volume plugins to report node specific // volume limits @@ -307,7 +300,9 @@ const ( BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes" // owner: @kevtaylor + // alpha: v1.14 // beta: v1.15 + // ga: v1.17 // // Allow subpath environment variable substitution // Only applicable if the VolumeSubpath feature is also enabled @@ -315,7 +310,7 @@ const ( // owner: @vikaschoudhary16 // beta: v1.12 - // + // ga: v1.17 // // Enable resource quota scope selectors ResourceQuotaScopeSelectors featuregate.Feature = "ResourceQuotaScopeSelectors" @@ -344,6 +339,7 @@ const ( // owner: @mtaufen // alpha: v1.12 // beta: v1.14 + // GA: v1.17 // // Kubelet uses the new Lease API to report node heartbeats, // (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. @@ -357,6 +353,7 @@ const ( // owner: @xing-yang // alpha: v1.12 + // beta: v1.17 // // Enable volume snapshot data source support. VolumeSnapshotDataSource featuregate.Feature = "VolumeSnapshotDataSource" @@ -382,34 +379,65 @@ const ( // owner: @davidz627 // alpha: v1.14 + // beta: v1.17 // // Enables the in-tree storage to CSI Plugin migration feature. CSIMigration featuregate.Feature = "CSIMigration" // owner: @davidz627 // alpha: v1.14 + // beta: v1.17 // // Enables the GCE PD in-tree driver to GCE CSI Driver migration feature. CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE" + // owner: @davidz627 + // alpha: v1.17 + // + // Disables the GCE PD in-tree driver. + // Expects GCE PD CSI Driver to be installed and configured on all nodes. + CSIMigrationGCEComplete featuregate.Feature = "CSIMigrationGCEComplete" + // owner: @leakingtapan // alpha: v1.14 + // beta: v1.17 // // Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature. CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS" + // owner: @leakingtapan + // alpha: v1.17 + // + // Disables the AWS EBS in-tree driver. + // Expects AWS EBS CSI Driver to be installed and configured on all nodes. + CSIMigrationAWSComplete featuregate.Feature = "CSIMigrationAWSComplete" + // owner: @andyzhangx // alpha: v1.15 // // Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature. CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk" + // owner: @andyzhangx + // alpha: v1.17 + // + // Disables the Azure Disk in-tree driver. + // Expects Azure Disk CSI Driver to be installed and configured on all nodes. + CSIMigrationAzureDiskComplete featuregate.Feature = "CSIMigrationAzureDiskComplete" + // owner: @andyzhangx // alpha: v1.15 // // Enables the Azure File in-tree driver to Azure File Driver migration feature. CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile" + // owner: @andyzhangx + // alpha: v1.17 + // + // Disables the Azure File in-tree driver. + // Expects Azure File CSI Driver to be installed and configured on all nodes. + CSIMigrationAzureFileComplete featuregate.Feature = "CSIMigrationAzureFileComplete" + // owner: @RobertKrawitz // beta: v1.15 // @@ -425,6 +453,7 @@ const ( // owner: @bclau // alpha: v1.16 + // beta: v1.17 // // Enables support for running container entrypoints as different usernames than their default ones. WindowsRunAsUserName featuregate.Feature = "WindowsRunAsUserName" @@ -435,14 +464,17 @@ const ( // Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature. CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack" - // owner: @verult - // GA: v1.13 + // owner: @adisky + // alpha: v1.17 // - // Enables the regional PD feature on GCE. - deprecatedGCERegionalPersistentDisk featuregate.Feature = "GCERegionalPersistentDisk" + // Disables the OpenStack Cinder in-tree driver. + // Expects the OpenStack Cinder CSI Driver to be installed and configured on all nodes. + CSIMigrationOpenStackComplete featuregate.Feature = "CSIMigrationOpenStackComplete" // owner: @MrHohn - // beta: v1.16 + // alpha: v1.15 + // beta: v1.16 + // GA: v1.17 // // Enables Finalizer Protection for Service LoadBalancers. ServiceLoadBalancerFinalizer featuregate.Feature = "ServiceLoadBalancerFinalizer" @@ -497,12 +529,24 @@ const ( // Enables the startupProbe in kubelet worker. StartupProbe featuregate.Feature = "StartupProbe" - // owner @deads2k - // deprecated: v1.16 + // owner: @deads2k + // beta: v1.17 + // + // Enables the users to skip TLS verification of kubelets on pod logs requests + AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy" + + // owner: @mortent + // alpha: v1.3 + // beta: v1.5 // - // Enable the aggregated discovery timeout to ensure client responsiveness. Note this feature is present - // only for backward compatibility, it will be removed in the 1.17 release. - EnableAggregatedDiscoveryTimeout featuregate.Feature = "EnableAggregatedDiscoveryTimeout" + // Enable all logic related to the PodDisruptionBudget API object in policy + PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget" + + // owner: @m1093782566 + // alpha: v1.17 + // + // Enables topology aware service routing + ServiceTopology featuregate.Feature = "ServiceTopology" ) func init() { @@ -516,74 +560,79 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS AppArmor: {Default: true, PreRelease: featuregate.Beta}, DynamicKubeletConfig: {Default: true, PreRelease: featuregate.Beta}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta}, - DevicePlugins: {Default: true, PreRelease: featuregate.Beta}, - TaintBasedEvictions: {Default: true, PreRelease: featuregate.Beta}, - RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, - RotateKubeletClientCertificate: {Default: true, PreRelease: featuregate.Beta}, - PersistentLocalVolumes: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.17 - LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.Beta}, - Sysctls: {Default: true, PreRelease: featuregate.Beta}, - EphemeralContainers: {Default: false, PreRelease: featuregate.Alpha}, - PodShareProcessNamespace: {Default: true, PreRelease: featuregate.Beta}, - PodPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 - TaintNodesByCondition: {Default: true, PreRelease: featuregate.Beta}, - QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, - ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta}, - ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta}, - ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta}, - AttachVolumeLimit: {Default: true, PreRelease: featuregate.Beta}, - CPUManager: {Default: true, PreRelease: featuregate.Beta}, - CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, - TopologyManager: {Default: false, PreRelease: featuregate.Alpha}, - ServiceNodeExclusion: {Default: false, PreRelease: featuregate.Alpha}, - NodeDisruptionExclusion: {Default: false, PreRelease: featuregate.Alpha}, - MountContainers: {Default: false, PreRelease: featuregate.Alpha}, - CSIDriverRegistry: {Default: true, PreRelease: featuregate.Beta}, - CSINodeInfo: {Default: true, PreRelease: featuregate.Beta}, - BlockVolume: {Default: true, PreRelease: featuregate.Beta}, - StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA}, - ResourceLimitsPriorityFunction: {Default: false, PreRelease: featuregate.Alpha}, - SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA}, - SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta}, - SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta}, - HyperVContainer: {Default: false, PreRelease: featuregate.Alpha}, - ScheduleDaemonSetPods: {Default: true, PreRelease: featuregate.Beta}, - TokenRequest: {Default: true, PreRelease: featuregate.Beta}, - TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, - BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha}, - CRIContainerLogRotation: {Default: true, PreRelease: featuregate.Beta}, - deprecatedGCERegionalPersistentDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.17 - CSIMigration: {Default: false, PreRelease: featuregate.Alpha}, - CSIMigrationGCE: {Default: false, PreRelease: featuregate.Alpha}, - CSIMigrationAWS: {Default: false, PreRelease: featuregate.Alpha}, - CSIMigrationAzureDisk: {Default: false, PreRelease: featuregate.Alpha}, - CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Alpha}, - RunAsGroup: {Default: true, PreRelease: featuregate.Beta}, - CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Alpha}, - VolumeSubpath: {Default: true, PreRelease: featuregate.GA}, - BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha}, - VolumeSubpathEnvExpansion: {Default: true, PreRelease: featuregate.Beta}, - ResourceQuotaScopeSelectors: {Default: true, PreRelease: featuregate.Beta}, - CSIBlockVolume: {Default: true, PreRelease: featuregate.Beta}, - CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta}, - RuntimeClass: {Default: true, PreRelease: featuregate.Beta}, - NodeLease: {Default: true, PreRelease: featuregate.Beta}, - SCTPSupport: {Default: false, PreRelease: featuregate.Alpha}, - VolumeSnapshotDataSource: {Default: false, PreRelease: featuregate.Alpha}, - ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, - TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha}, - KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, - WindowsGMSA: {Default: true, PreRelease: featuregate.Beta}, - WindowsRunAsUserName: {Default: false, PreRelease: featuregate.Alpha}, - ServiceLoadBalancerFinalizer: {Default: true, PreRelease: featuregate.Beta}, + DevicePlugins: {Default: true, PreRelease: featuregate.Beta}, + TaintBasedEvictions: {Default: true, PreRelease: featuregate.Beta}, + RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, + RotateKubeletClientCertificate: {Default: true, PreRelease: featuregate.Beta}, + LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.Beta}, + Sysctls: {Default: true, PreRelease: featuregate.Beta}, + EphemeralContainers: {Default: false, PreRelease: featuregate.Alpha}, + PodShareProcessNamespace: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19 + PodPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 + TaintNodesByCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 + QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, + ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta}, + ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta}, + ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta}, + AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19 + CPUManager: {Default: true, PreRelease: featuregate.Beta}, + CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, + TopologyManager: {Default: false, PreRelease: featuregate.Alpha}, + ServiceNodeExclusion: {Default: false, PreRelease: featuregate.Alpha}, + NodeDisruptionExclusion: {Default: false, PreRelease: featuregate.Alpha}, + CSIDriverRegistry: {Default: true, PreRelease: featuregate.Beta}, + CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19 + BlockVolume: {Default: true, PreRelease: featuregate.Beta}, + StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: featuregate.Alpha}, + SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA}, + SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta}, + SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta}, + HyperVContainer: {Default: false, PreRelease: featuregate.Alpha}, + ScheduleDaemonSetPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 + TokenRequest: {Default: true, PreRelease: featuregate.Beta}, + TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, + BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha}, + CRIContainerLogRotation: {Default: true, PreRelease: featuregate.Beta}, + CSIMigration: {Default: true, PreRelease: featuregate.Beta}, + CSIMigrationGCE: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires GCE PD CSI Driver) + CSIMigrationGCEComplete: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationAWS: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires AWS EBS CSI driver) + CSIMigrationAWSComplete: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationAzureDisk: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationAzureDiskComplete: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationAzureFileComplete: {Default: false, PreRelease: featuregate.Alpha}, + RunAsGroup: {Default: true, PreRelease: featuregate.Beta}, + CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationOpenStackComplete: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSubpath: {Default: true, PreRelease: featuregate.GA}, + BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSubpathEnvExpansion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19, + ResourceQuotaScopeSelectors: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 + CSIBlockVolume: {Default: true, PreRelease: featuregate.Beta}, + CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta}, + RuntimeClass: {Default: true, PreRelease: featuregate.Beta}, + NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + SCTPSupport: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSnapshotDataSource: {Default: true, PreRelease: featuregate.Beta}, + ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, + TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha}, + KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, + WindowsGMSA: {Default: true, PreRelease: featuregate.Beta}, + WindowsRunAsUserName: {Default: true, PreRelease: featuregate.Beta}, + ServiceLoadBalancerFinalizer: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, NonPreemptingPriority: {Default: false, PreRelease: featuregate.Alpha}, VolumePVCDataSource: {Default: true, PreRelease: featuregate.Beta}, PodOverhead: {Default: false, PreRelease: featuregate.Alpha}, IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha}, - EndpointSlice: {Default: false, PreRelease: featuregate.Alpha}, + EndpointSlice: {Default: false, PreRelease: featuregate.Beta}, EvenPodsSpread: {Default: false, PreRelease: featuregate.Alpha}, StartupProbe: {Default: false, PreRelease: featuregate.Alpha}, + AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.Beta}, + PodDisruptionBudget: {Default: true, PreRelease: featuregate.Beta}, + ServiceTopology: {Default: false, PreRelease: featuregate.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -595,7 +644,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta}, genericfeatures.DryRun: {Default: true, PreRelease: featuregate.Beta}, genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.RequestManagement: {Default: false, PreRelease: featuregate.Alpha}, + genericfeatures.APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -603,9 +652,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, apiextensionsfeatures.CustomResourceWebhookConversion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, apiextensionsfeatures.CustomResourcePublishOpenAPI: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - apiextensionsfeatures.CustomResourceDefaulting: {Default: true, PreRelease: featuregate.Beta}, - - EnableAggregatedDiscoveryTimeout: {Default: true, PreRelease: featuregate.Deprecated}, + apiextensionsfeatures.CustomResourceDefaulting: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // TODO: remove in 1.18 // features that enable backwards compatibility but are scheduled to be removed // ... diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD index 4aa528ec1fd..f942cf4c2aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD @@ -18,6 +18,8 @@ go_library( "kubelet_network_linux.go", "kubelet_network_others.go", "kubelet_node_status.go", + "kubelet_node_status_others.go", + "kubelet_node_status_windows.go", "kubelet_pods.go", "kubelet_resources.go", "kubelet_volumes.go", @@ -42,7 +44,6 @@ go_library( "//pkg/fieldpath:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/apis/podresources:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/certificate:go_default_library", @@ -64,7 +65,6 @@ go_library( "//pkg/kubelet/logs:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics/collectors:go_default_library", - "//pkg/kubelet/mountpod:go_default_library", "//pkg/kubelet/network/dns:go_default_library", "//pkg/kubelet/nodelease:go_default_library", "//pkg/kubelet/nodestatus:go_default_library", @@ -96,13 +96,10 @@ go_library( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/sysctl:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/iptables:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/removeall:go_default_library", @@ -111,7 +108,6 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/csi:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/exec:go_default_library", "//pkg/volume/util/hostutil:go_default_library", "//pkg/volume/util/subpath:go_default_library", "//pkg/volume/util/types:go_default_library", @@ -145,14 +141,22 @@ go_library( "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//third_party/forked/golang/expansion:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", "//vendor/k8s.io/utils/path:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/kubelet/winstats:go_default_library", + ], + "//conditions:default": [], + }), ) go_test( @@ -209,11 +213,8 @@ go_test( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/taints:go_default_library", - "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", @@ -242,18 +243,23 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//staging/src/k8s.io/component-base/featuregate:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", ], @@ -295,7 +301,6 @@ filegroup( "//pkg/kubelet/lifecycle:all-srcs", "//pkg/kubelet/logs:all-srcs", "//pkg/kubelet/metrics:all-srcs", - "//pkg/kubelet/mountpod:all-srcs", "//pkg/kubelet/network:all-srcs", "//pkg/kubelet/nodelease:all-srcs", "//pkg/kubelet/nodestatus:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD index 686b93bc7b6..38505ce069e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD @@ -37,11 +37,6 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubelet/apis/config:all-srcs", - "//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs", - "//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs", - "//pkg/kubelet/apis/pluginregistration/v1:all-srcs", - "//pkg/kubelet/apis/pluginregistration/v1alpha1:all-srcs", - "//pkg/kubelet/apis/pluginregistration/v1beta1:all-srcs", "//pkg/kubelet/apis/podresources:all-srcs", "//pkg/kubelet/apis/resourcemetrics/v1alpha1:all-srcs", "//pkg/kubelet/apis/stats/v1alpha1:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/scheme/scheme.go index 2c7c3e51a13..0a5ae0b5eea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/scheme/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/scheme/scheme.go @@ -26,8 +26,9 @@ import ( // Utility functions for the Kubelet's kubeletconfig API group // NewSchemeAndCodecs is a utility function that returns a Scheme and CodecFactory -// that understand the types in the kubeletconfig API group. -func NewSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { +// that understand the types in the kubeletconfig API group. Passing mutators allows +// for adjusting the behavior of the CodecFactory, for example enable strict decoding. +func NewSchemeAndCodecs(mutators ...serializer.CodecFactoryOptionsMutator) (*runtime.Scheme, *serializer.CodecFactory, error) { scheme := runtime.NewScheme() if err := kubeletconfig.AddToScheme(scheme); err != nil { return nil, nil, err @@ -35,6 +36,6 @@ func NewSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { if err := kubeletconfigv1beta1.AddToScheme(scheme); err != nil { return nil, nil, err } - codecs := serializer.NewCodecFactory(scheme) + codecs := serializer.NewCodecFactory(scheme, mutators...) return scheme, &codecs, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go index 446ad78326c..ddb8a3e0eba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -331,6 +331,10 @@ type KubeletConfiguration struct { // This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`. // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. EnforceNodeAllocatable []string + // This option specifies the cpu list reserved for the host level system threads and kubernetes related threads. + // This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. + // This option overwrites CPUs provided by system-reserved and kube-reserved. + ReservedSystemCPUs string } type KubeletAuthorizationMode string diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go index 170b6ac8bda..61daafc113d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go @@ -112,7 +112,7 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura // set to NodeStatusUpdateFrequency if NodeStatusUpdateFrequency is set // explicitly. if obj.NodeStatusUpdateFrequency == zeroDuration { - obj.NodeStatusReportFrequency = metav1.Duration{Duration: time.Minute} + obj.NodeStatusReportFrequency = metav1.Duration{Duration: 5 * time.Minute} } else { obj.NodeStatusReportFrequency = obj.NodeStatusUpdateFrequency } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go index cb761146ea9..c478e06fcf1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -326,6 +326,7 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in out.ConfigMapAndSecretChangeDetectionStrategy = config.ResourceChangeDetectionStrategy(in.ConfigMapAndSecretChangeDetectionStrategy) out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) + out.ReservedSystemCPUs = in.ReservedSystemCPUs out.SystemReservedCgroup = in.SystemReservedCgroup out.KubeReservedCgroup = in.KubeReservedCgroup out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) @@ -462,6 +463,7 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in out.SystemReservedCgroup = in.SystemReservedCgroup out.KubeReservedCgroup = in.KubeReservedCgroup out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) + out.ReservedSystemCPUs = in.ReservedSystemCPUs return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/BUILD index f7348901d4e..c50698e5aa0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/BUILD @@ -17,6 +17,7 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/apis/config:go_default_library", + "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go index 31c6cedda28..877ad7c7d29 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go @@ -25,6 +25,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -142,6 +143,15 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error allErrors = append(allErrors, fmt.Errorf("invalid configuration: option %q specified for HairpinMode (--hairpin-mode). Valid options are %q, %q or %q", kc.HairpinMode, kubeletconfig.HairpinNone, kubeletconfig.HairpinVeth, kubeletconfig.PromiscuousBridge)) } + if kc.ReservedSystemCPUs != "" { + // --reserved-cpus does not support --system-reserved-cgroup or --kube-reserved-cgroup + if kc.SystemReservedCgroup != "" || kc.KubeReservedCgroup != "" { + allErrors = append(allErrors, fmt.Errorf("can't use --reserved-cpus with --system-reserved-cgroup or --kube-reserved-cgroup")) + } + if _, err := cpuset.Parse(kc.ReservedSystemCPUs); err != nil { + allErrors = append(allErrors, fmt.Errorf("unable to parse --reserved-cpus, error: %v", err)) + } + } if err := validateKubeletOSConfiguration(kc); err != nil { allErrors = append(allErrors, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/BUILD index 3addd5c1877..fc194ac2ba1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/BUILD @@ -8,6 +8,7 @@ go_library( deps = [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/server/stats:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/config.go index b0697dec261..38e5a17f0b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1/config.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "time" + "k8s.io/component-base/metrics" summary "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/server/stats" ) @@ -26,55 +27,93 @@ import ( // Version is the string representation of the version of this configuration const Version = "v1alpha1" +var ( + nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds_total", + "Cumulative cpu time consumed by the node in core-seconds", + nil, + nil, + metrics.ALPHA, + "") + + nodeMemoryUsageDesc = metrics.NewDesc("node_memory_working_set_bytes", + "Current working set of the node in bytes", + nil, + nil, + metrics.ALPHA, + "") + + containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds_total", + "Cumulative cpu time consumed by the container in core-seconds", + []string{"container", "pod", "namespace"}, + nil, + metrics.ALPHA, + "") + + containerMemoryUsageDesc = metrics.NewDesc("container_memory_working_set_bytes", + "Current working set of the container in bytes", + []string{"container", "pod", "namespace"}, + nil, + metrics.ALPHA, + "") +) + +// getNodeCPUMetrics returns CPU utilization of a node. +func getNodeCPUMetrics(s summary.NodeStats) (*float64, time.Time) { + if s.CPU == nil { + return nil, time.Time{} + } + v := float64(*s.CPU.UsageCoreNanoSeconds) / float64(time.Second) + return &v, s.CPU.Time.Time +} + +// getNodeMemoryMetrics returns memory utilization of a node. +func getNodeMemoryMetrics(s summary.NodeStats) (*float64, time.Time) { + if s.Memory == nil { + return nil, time.Time{} + } + v := float64(*s.Memory.WorkingSetBytes) + return &v, s.Memory.Time.Time +} + +// getContainerCPUMetrics returns CPU utilization of a container. +func getContainerCPUMetrics(s summary.ContainerStats) (*float64, time.Time) { + if s.CPU == nil { + return nil, time.Time{} + } + v := float64(*s.CPU.UsageCoreNanoSeconds) / float64(time.Second) + return &v, s.CPU.Time.Time +} + +// getContainerMemoryMetrics returns memory utilization of a container. +func getContainerMemoryMetrics(s summary.ContainerStats) (*float64, time.Time) { + if s.Memory == nil { + return nil, time.Time{} + } + v := float64(*s.Memory.WorkingSetBytes) + return &v, s.Memory.Time.Time +} + // Config is the v1alpha1 resource metrics definition func Config() stats.ResourceMetricsConfig { return stats.ResourceMetricsConfig{ NodeMetrics: []stats.NodeResourceMetric{ { - Name: "node_cpu_usage_seconds_total", - Description: "Cumulative cpu time consumed by the node in core-seconds", - ValueFn: func(s summary.NodeStats) (*float64, time.Time) { - if s.CPU == nil { - return nil, time.Time{} - } - v := float64(*s.CPU.UsageCoreNanoSeconds) / float64(time.Second) - return &v, s.CPU.Time.Time - }, + Desc: nodeCPUUsageDesc, + ValueFn: getNodeCPUMetrics, }, { - Name: "node_memory_working_set_bytes", - Description: "Current working set of the node in bytes", - ValueFn: func(s summary.NodeStats) (*float64, time.Time) { - if s.Memory == nil { - return nil, time.Time{} - } - v := float64(*s.Memory.WorkingSetBytes) - return &v, s.Memory.Time.Time - }, + Desc: nodeMemoryUsageDesc, + ValueFn: getNodeMemoryMetrics, }, }, ContainerMetrics: []stats.ContainerResourceMetric{ { - Name: "container_cpu_usage_seconds_total", - Description: "Cumulative cpu time consumed by the container in core-seconds", - ValueFn: func(s summary.ContainerStats) (*float64, time.Time) { - if s.CPU == nil { - return nil, time.Time{} - } - v := float64(*s.CPU.UsageCoreNanoSeconds) / float64(time.Second) - return &v, s.CPU.Time.Time - }, + Desc: containerCPUUsageDesc, + ValueFn: getContainerCPUMetrics, }, { - Name: "container_memory_working_set_bytes", - Description: "Current working set of the container in bytes", - ValueFn: func(s summary.ContainerStats) (*float64, time.Time) { - if s.Memory == nil { - return nil, time.Time{} - } - v := float64(*s.Memory.WorkingSetBytes) - return &v, s.Memory.Time.Time - }, + Desc: containerMemoryUsageDesc, + ValueFn: getContainerMemoryMetrics, }, }, } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go index b473e524956..a38ad13b545 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go @@ -34,28 +34,21 @@ const ( // and GA labels to ensure backward compatibility. // TODO: stop applying the beta Arch labels in Kubernetes 1.18. LabelArch = "beta.kubernetes.io/arch" - - // GA versions of the legacy beta labels. - // TODO: update kubelet and controllers to set both beta and GA labels, then export these constants - labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone" - labelZoneRegionGA = "failure-domain.kubernetes.io/region" - labelInstanceTypeGA = "kubernetes.io/instance-type" ) var kubeletLabels = sets.NewString( v1.LabelHostname, + v1.LabelZoneFailureDomainStable, + v1.LabelZoneRegionStable, v1.LabelZoneFailureDomain, v1.LabelZoneRegion, v1.LabelInstanceType, + v1.LabelInstanceTypeStable, v1.LabelOSStable, v1.LabelArchStable, LabelOS, LabelArch, - - labelZoneFailureDomainGA, - labelZoneRegionGA, - labelInstanceTypeGA, ) var kubeletLabelNamespaces = sets.NewString( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD index 8e0d150eeb0..f3d64675cee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD @@ -28,6 +28,21 @@ go_library( "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", + "//vendor/github.com/google/cadvisor/container:go_default_library", + "//vendor/github.com/google/cadvisor/container/containerd/install:go_default_library", + "//vendor/github.com/google/cadvisor/container/crio/install:go_default_library", + "//vendor/github.com/google/cadvisor/container/docker/install:go_default_library", + "//vendor/github.com/google/cadvisor/container/systemd/install:go_default_library", + "//vendor/github.com/google/cadvisor/fs:go_default_library", + "//vendor/github.com/google/cadvisor/manager:go_default_library", + "//vendor/github.com/google/cadvisor/utils/cloudinfo/aws:go_default_library", + "//vendor/github.com/google/cadvisor/utils/cloudinfo/azure:go_default_library", + "//vendor/github.com/google/cadvisor/utils/cloudinfo/gce:go_default_library", + "//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", @@ -55,6 +70,13 @@ go_test( srcs = ["util_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/github.com/google/cadvisor/container/crio:go_default_library", + "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go index 23b26d7084b..c78e882ca1e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -1,4 +1,4 @@ -// +build cgo,linux +// +build linux /* Copyright 2015 The Kubernetes Authors. @@ -94,6 +94,7 @@ func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots [ cadvisormetrics.NetworkUsageMetrics: struct{}{}, cadvisormetrics.AcceleratorUsageMetrics: struct{}{}, cadvisormetrics.AppMetrics: struct{}{}, + cadvisormetrics.ProcessMetrics: struct{}{}, } if usingLegacyStats { includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go index 2d1642b8323..4fd923e9fe7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows linux,!cgo +// +build !linux,!windows /* Copyright 2015 The Kubernetes Authors. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go index ae69406b7a1..13b0c235b48 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go @@ -1,4 +1,4 @@ -// +build cgo,linux +// +build linux /* Copyright 2017 The Kubernetes Authors. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_unsupported.go index af90f150c16..a8cd6aeec64 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux linux,!cgo +// +build !linux /* Copyright 2017 The Kubernetes Authors. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go index 3e579e6bf63..7bdc42b3cbe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -356,7 +356,7 @@ func requestNodeCertificate(client certificatesv1beta1.CertificateSigningRequest } // This digest should include all the relevant pieces of the CSR we care about. -// We can't direcly hash the serialized CSR because of random padding that we +// We can't directly hash the serialized CSR because of random padding that we // regenerate every loop and we include usages which are not contained in the // CSR. This needs to be kept up to date as we add new fields to the node // certificates and with ensureCompatible. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go index cea55f0c34e..31e77108ce0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go @@ -25,7 +25,7 @@ import ( "sort" certificates "k8s.io/api/certificates/v1beta1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" @@ -52,17 +52,38 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg if err != nil { return nil, fmt.Errorf("failed to initialize server certificate store: %v", err) } - var certificateExpiration = compbasemetrics.NewGauge( + certificateExpiration := compbasemetrics.NewGauge( &compbasemetrics.GaugeOpts{ - Namespace: metrics.KubeletSubsystem, - Subsystem: "certificate_manager", - Name: "server_expiration_seconds", + Subsystem: metrics.KubeletSubsystem, + Name: "certificate_manager_server_expiration_seconds", Help: "Gauge of the lifetime of a certificate. The value is the date the certificate will expire in seconds since January 1, 1970 UTC.", StabilityLevel: compbasemetrics.ALPHA, }, ) legacyregistry.MustRegister(certificateExpiration) + certificateRotationAge := compbasemetrics.NewHistogram( + &compbasemetrics.HistogramOpts{ + Subsystem: metrics.KubeletSubsystem, + Name: "certificate_manager_server_rotation_seconds", + Help: "Histogram of the number of seconds the previous certificate lived before being rotated.", + Buckets: []float64{ + 60, // 1 minute + 3600, // 1 hour + 14400, // 4 hours + 86400, // 1 day + 604800, // 1 week + 2592000, // 1 month + 7776000, // 3 months + 15552000, // 6 months + 31104000, // 1 year + 124416000, // 4 years + }, + StabilityLevel: compbasemetrics.ALPHA, + }, + ) + legacyregistry.MustRegister(certificateRotationAge) + getTemplate := func() *x509.CertificateRequest { hostnames, ips := addressesToHostnamesAndIPs(getAddresses()) // don't return a template if we have no addresses to request for @@ -100,6 +121,7 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg }, CertificateStore: certificateStore, CertificateExpiration: certificateExpiration, + CertificateRotation: certificateRotationAge, }) if err != nil { return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD index 96aaa1996da..7b57d09c324 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/cm/cpumanager:go_default_library", + "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -45,24 +46,54 @@ go_library( "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ + "//pkg/api/v1/resource:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", + "//pkg/kubelet/cm/cpumanager/topology:go_default_library", + "//pkg/kubelet/cm/devicemanager:go_default_library", + "//pkg/kubelet/cm/util:go_default_library", + "//pkg/kubelet/events:go_default_library", + "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/qos:go_default_library", + "//pkg/kubelet/stats/pidlimit:go_default_library", + "//pkg/kubelet/types:go_default_library", + "//pkg/util/oom:go_default_library", + "//pkg/util/procfs:go_default_library", + "//pkg/util/sysctl:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/github.com/docker/go-units:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/utils/io:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/api/v1/resource:go_default_library", @@ -77,7 +108,6 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/stats/pidlimit:go_default_library", "//pkg/kubelet/types:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", "//pkg/util/sysctl:go_default_library", @@ -92,37 +122,38 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", "//vendor/k8s.io/utils/io:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/path:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:plan9": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/cadvisor:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), @@ -140,10 +171,21 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/features:go_default_library", + "//pkg/kubelet/eviction/api:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/features:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -151,6 +193,7 @@ go_test( "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index a653e67c790..2e3968f4feb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -153,7 +153,7 @@ func (l *libcontainerAdapter) newManager(cgroups *libcontainerconfigs.Cgroup, pa if !cgroupsystemd.UseSystemd() { panic("systemd cgroup manager not available") } - return &cgroupsystemd.Manager{ + return &cgroupsystemd.LegacyManager{ Cgroups: cgroups, Paths: paths, }, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index 40ed8bd21ec..b91102f6a5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -137,6 +138,7 @@ type NodeConfig struct { type NodeAllocatableConfig struct { KubeReservedCgroupName string SystemReservedCgroupName string + ReservedSystemCPUs cpuset.CPUSet EnforceNodeAllocatable sets.String KubeReserved v1.ResourceList SystemReserved v1.ResourceList diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index 1baa280768f..81d3a015639 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -33,6 +33,9 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" "k8s.io/klog" + utilio "k8s.io/utils/io" + "k8s.io/utils/mount" + utilpath "k8s.io/utils/path" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -59,12 +62,9 @@ import ( "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/status" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilio "k8s.io/utils/io" - utilpath "k8s.io/utils/path" ) const ( @@ -297,7 +297,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I return nil, err } - klog.Infof("[topologymanager] Initilizing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy) + klog.Infof("[topologymanager] Initializing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy) } else { cm.topologyManager = topologymanager.NewFakeManager() } @@ -320,6 +320,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I nodeConfig.ExperimentalCPUManagerReconcilePeriod, machineInfo, numaNodeInfo, + nodeConfig.NodeAllocatableConfig.ReservedSystemCPUs, cm.GetNodeAllocatableReservation(), nodeConfig.KubeletRootDir, cm.topologyManager, @@ -573,7 +574,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, // Initialize CPU manager if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) { - cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), podStatusProvider, runtimeService) + cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService) } // cache the node Info including resource capacity and diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go index 2e02cf5d20a..d59afa81e50 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go @@ -21,13 +21,14 @@ package cm import ( "fmt" - "k8s.io/api/core/v1" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/kubernetes/pkg/util/mount" ) type unsupportedContainerManager struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go index 55f172171e6..e4a88b86b9b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go @@ -24,12 +24,14 @@ package cm import ( "fmt" + "k8s.io/klog" + "k8s.io/utils/mount" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" internalapi "k8s.io/cri-api/pkg/apis" - "k8s.io/klog" kubefeatures "k8s.io/kubernetes/pkg/features" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" @@ -41,7 +43,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/status" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - "k8s.io/kubernetes/pkg/util/mount" ) type containerManagerImpl struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD index 24cf06638f0..e63627c59df 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD @@ -10,7 +10,6 @@ go_library( "policy.go", "policy_none.go", "policy_static.go", - "topology_hints.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", visibility = ["//visibility:public"], @@ -20,7 +19,8 @@ go_library( "//pkg/kubelet/cm/cpumanager/topology:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library", - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", + "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/status:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -48,7 +48,7 @@ go_test( "//pkg/kubelet/cm/cpumanager/topology:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library", - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/OWNERS index 77f30f6d64b..c0aaa5bbdc4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/OWNERS @@ -6,3 +6,5 @@ approvers: - ConnorDoyle - sjenning - balajismaniam +reviewers: +- klueska diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index 877b71dd353..d93f0db3c24 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -23,7 +23,7 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/status" ) @@ -51,7 +52,7 @@ const cpuManagerStateFileName = "cpu_manager_state" // Manager interface provides methods for Kubelet to manage pod cpus. type Manager interface { // Start is called during Kubelet initialization. - Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) + Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) // AddContainer is called between container create and container start // so that initial CPU affinity settings can be written through to the @@ -66,9 +67,10 @@ type Manager interface { // State returns a read-only interface to the internal CPU manager state. State() state.Reader - // GetTopologyHints implements the Topology Manager Interface and is - // consulted to make Topology aware resource alignments - GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint + // GetTopologyHints implements the topologymanager.HintProvider Interface + // and is consulted to achieve NUMA aware resource alignment among this + // and other resource controllers. + GetTopologyHints(v1.Pod, v1.Container) map[string][]topologymanager.TopologyHint } type manager struct { @@ -97,12 +99,21 @@ type manager struct { topology *topology.CPUTopology nodeAllocatableReservation v1.ResourceList + + // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. + // We use it to determine when we can purge inactive pods from checkpointed state. + sourcesReady config.SourcesReady } var _ Manager = &manager{} +type sourcesReadyStub struct{} + +func (s *sourcesReadyStub) AddSource(source string) {} +func (s *sourcesReadyStub) AllReady() bool { return true } + // NewManager creates new cpu manager based on provided policy -func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, numaNodeInfo topology.NUMANodeInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { +func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, numaNodeInfo topology.NUMANodeInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { var topo *topology.CPUTopology var policy Policy @@ -136,7 +147,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo // exclusively allocated. reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000 numReservedCPUs := int(math.Ceil(reservedCPUsFloat)) - policy = NewStaticPolicy(topo, numReservedCPUs, affinity) + policy = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity) default: return nil, fmt.Errorf("unknown policy: \"%s\"", cpuPolicyName) @@ -154,13 +165,14 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo topology: topo, nodeAllocatableReservation: nodeAllocatableReservation, } + manager.sourcesReady = &sourcesReadyStub{} return manager, nil } -func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { +func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { klog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) klog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) - + m.sourcesReady = sourcesReady m.activePods = activePods m.podStatusProvider = podStatusProvider m.containerRuntime = containerRuntime @@ -216,18 +228,81 @@ func (m *manager) State() state.Reader { return m.state } +func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { + // Garbage collect any stranded resources before providing TopologyHints + m.removeStaleState() + // Delegate to active policy + return m.policy.GetTopologyHints(m.state, pod, container) +} + type reconciledContainer struct { podName string containerName string containerID string } +func (m *manager) removeStaleState() { + // Only once all sources are ready do we attempt to remove any stale state. + // This ensures that the call to `m.activePods()` below will succeed with + // the actual active pods list. + if !m.sourcesReady.AllReady() { + return + } + + // We grab the lock to ensure that no new containers will grab CPUs while + // executing the code below. Without this lock, its possible that we end up + // removing state that is newly added by an asynchronous call to + // AddContainer() during the execution of this code. + m.Lock() + defer m.Unlock() + + // We remove stale state very conservatively, only removing *any* state + // once we know for sure that we wont be accidentally removing state that + // is still valid. Since this function is called periodically, we will just + // try again next time this function is called. + activePods := m.activePods() + if len(activePods) == 0 { + // If there are no active pods, skip the removal of stale state. + return + } + + // Build a list of containerIDs for all containers in all active Pods. + activeContainers := make(map[string]struct{}) + for _, pod := range activePods { + pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID) + if !ok { + // If even one pod does not have it's status set, skip state removal. + return + } + for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { + containerID, err := findContainerIDByName(&pstatus, container.Name) + if err != nil { + // If even one container does not have it's containerID set, skip state removal. + return + } + activeContainers[containerID] = struct{}{} + } + } + + // Loop through the CPUManager state. Remove any state for containers not + // in the `activeContainers` list built above. The shortcircuits in place + // above ensure that no erroneous state will ever be removed. + for containerID := range m.state.GetCPUAssignments() { + if _, ok := activeContainers[containerID]; !ok { + klog.Errorf("[cpumanager] removeStaleState: removing container: %s)", containerID) + err := m.policy.RemoveContainer(m.state, containerID) + if err != nil { + klog.Errorf("[cpumanager] removeStaleState: failed to remove container %s, error: %v)", containerID, err) + } + } + } +} + func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) { success = []reconciledContainer{} failure = []reconciledContainer{} - activeContainers := make(map[string]*v1.Pod) - + m.removeStaleState() for _, pod := range m.activePods() { allContainers := pod.Spec.InitContainers allContainers = append(allContainers, pod.Spec.Containers...) @@ -266,8 +341,6 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec } } - activeContainers[containerID] = pod - cset := m.state.GetCPUSetOrDefault(containerID) if cset.IsEmpty() { // NOTE: This should not happen outside of tests. @@ -286,16 +359,6 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec success = append(success, reconciledContainer{pod.Name, container.Name, containerID}) } } - - for containerID := range m.state.GetCPUAssignments() { - if pod, ok := activeContainers[containerID]; !ok { - err := m.RemoveContainer(containerID) - if err != nil { - klog.Errorf("[cpumanager] reconcileState: failed to remove container (pod: %s, container id: %s, error: %v)", pod.Name, containerID, err) - failure = append(failure, reconciledContainer{pod.Name, "", containerID}) - } - } - } return success, failure } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index 0a2c684bcf3..94ef3dc83e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -21,6 +21,7 @@ import ( "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/status" ) @@ -28,7 +29,7 @@ type fakeManager struct { state state.State } -func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { +func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { klog.Info("[fake cpumanager] Start()") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go index c79091659e3..83b5d07eedc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go @@ -19,6 +19,7 @@ package cpumanager import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" ) // Policy implements logic for pod container to CPU assignment. @@ -29,4 +30,8 @@ type Policy interface { AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error // RemoveContainer call is idempotent RemoveContainer(s state.State, containerID string) error + // GetTopologyHints implements the topologymanager.HintProvider Interface + // and is consulted to achieve NUMA aware resource alignment among this + // and other resource controllers. + GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go index 294edc6bf31..fd56d08f89a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go @@ -20,6 +20,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" ) type nonePolicy struct{} @@ -49,3 +50,7 @@ func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Cont func (p *nonePolicy) RemoveContainer(s state.State, containerID string) error { return nil } + +func (p *nonePolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 8789f6e23ae..dd71c026dfb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -19,14 +19,14 @@ package cpumanager import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/klog" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" ) // PolicyStatic is the name of the static policy @@ -89,14 +89,19 @@ var _ Policy = &staticPolicy{} // NewStaticPolicy returns a CPU manager policy that does not change CPU // assignments for exclusively pinned guaranteed containers after the main // container process starts. -func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, affinity topologymanager.Store) Policy { +func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store) Policy { allCPUs := topology.CPUDetails.CPUs() - // takeByTopology allocates CPUs associated with low-numbered cores from - // allCPUs. - // - // For example: Given a system with 8 CPUs available and HT enabled, - // if numReservedCPUs=2, then reserved={0,4} - reserved, _ := takeByTopology(topology, allCPUs, numReservedCPUs) + var reserved cpuset.CPUSet + if reservedCPUs.Size() > 0 { + reserved = reservedCPUs + } else { + // takeByTopology allocates CPUs associated with low-numbered cores from + // allCPUs. + // + // For example: Given a system with 8 CPUs available and HT enabled, + // if numReservedCPUs=2, then reserved={0,4} + reserved, _ = takeByTopology(topology, allCPUs, numReservedCPUs) + } if reserved.Size() != numReservedCPUs { panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs)) @@ -250,14 +255,14 @@ func (p *staticPolicy) RemoveContainer(s state.State, containerID string) (rerr return nil } -func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity socketmask.SocketMask) (cpuset.CPUSet, error) { +func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask) (cpuset.CPUSet, error) { klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d, socket: %v)", numCPUs, numaAffinity) // If there are aligned CPUs in numaAffinity, attempt to take those first. result := cpuset.NewCPUSet() if numaAffinity != nil { alignedCPUs := cpuset.NewCPUSet() - for _, numaNodeID := range numaAffinity.GetSockets() { + for _, numaNodeID := range numaAffinity.GetBits() { alignedCPUs = alignedCPUs.Union(p.assignableCPUs(s).Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID))) } @@ -301,3 +306,116 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int // https://golang.org/ref/spec#Numeric_types return int(cpuQuantity.Value()) } + +func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { + // If there are no CPU resources requested for this container, we do not + // generate any topology hints. + if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { + return nil + } + + // Get a count of how many guaranteed CPUs have been requested. + requested := p.guaranteedCPUs(&pod, &container) + + // If there are no guaranteed CPUs being requested, we do not generate + // any topology hints. This can happen, for example, because init + // containers don't have to have guaranteed CPUs in order for the pod + // to still be in the Guaranteed QOS tier. + if requested == 0 { + return nil + } + + // Short circuit to regenerate the same hints if there are already + // guaranteed CPUs allocated to the Container. This might happen after a + // kubelet restart, for example. + containerID, _ := findContainerIDByName(&pod.Status, container.Name) + if allocated, exists := s.GetCPUSet(containerID); exists { + if allocated.Size() != requested { + klog.Errorf("[cpumanager] CPUs already allocated to (pod %v, container %v) with different number than request: requested: %d, allocated: %d", string(pod.UID), container.Name, requested, allocated.Size()) + return map[string][]topologymanager.TopologyHint{ + string(v1.ResourceCPU): {}, + } + } + klog.Infof("[cpumanager] Regenerating TopologyHints for CPUs already allocated to (pod %v, container %v)", string(pod.UID), container.Name) + return map[string][]topologymanager.TopologyHint{ + string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, requested), + } + } + + // Get a list of available CPUs. + available := p.assignableCPUs(s) + + // Generate hints. + cpuHints := p.generateCPUTopologyHints(available, requested) + klog.Infof("[cpumanager] TopologyHints generated for pod '%v', container '%v': %v", pod.Name, container.Name, cpuHints) + + return map[string][]topologymanager.TopologyHint{ + string(v1.ResourceCPU): cpuHints, + } +} + +// generateCPUtopologyHints generates a set of TopologyHints given the set of +// available CPUs and the number of CPUs being requested. +// +// It follows the convention of marking all hints that have the same number of +// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and +// marking all others with 'Preferred: false'. +func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint { + // Initialize minAffinitySize to include all NUMA Nodes. + minAffinitySize := p.topology.CPUDetails.NUMANodes().Size() + // Initialize minSocketsOnMinAffinity to include all Sockets. + minSocketsOnMinAffinity := p.topology.CPUDetails.Sockets().Size() + + // Iterate through all combinations of socket bitmask and build hints from them. + hints := []topologymanager.TopologyHint{} + bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().ToSlice(), func(mask bitmask.BitMask) { + // First, update minAffinitySize and minSocketsOnMinAffinity for the + // current request size. + cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size() + socketsInMask := p.topology.CPUDetails.SocketsInNUMANodes(mask.GetBits()...).Size() + if cpusInMask >= request && mask.Count() < minAffinitySize { + minAffinitySize = mask.Count() + if socketsInMask < minSocketsOnMinAffinity { + minSocketsOnMinAffinity = socketsInMask + } + } + + // Then check to see if we have enough CPUs available on the current + // socket bitmask to satisfy the CPU request. + numMatching := 0 + for _, c := range availableCPUs.ToSlice() { + if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) { + numMatching++ + } + } + + // If we don't, then move onto the next combination. + if numMatching < request { + return + } + + // Otherwise, create a new hint from the socket bitmask and add it to the + // list of hints. We set all hint preferences to 'false' on the first + // pass through. + hints = append(hints, topologymanager.TopologyHint{ + NUMANodeAffinity: mask, + Preferred: false, + }) + }) + + // Loop back through all hints and update the 'Preferred' field based on + // counting the number of bits sets in the affinity mask and comparing it + // to the minAffinitySize. Only those with an equal number of bits set (and + // with a minimal set of sockets) will be considered preferred. + for i := range hints { + if hints[i].NUMANodeAffinity.Count() == minAffinitySize { + nodes := hints[i].NUMANodeAffinity.GetBits() + numSockets := p.topology.CPUDetails.SocketsInNUMANodes(nodes...).Size() + if numSockets == minSocketsOnMinAffinity { + hints[i].Preferred = true + } + } + } + + return hints +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go index 0550b644d57..be32509279b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go @@ -20,7 +20,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) -// ContainerCPUAssignments type used in cpu manger state +// ContainerCPUAssignments type used in cpu manager state type ContainerCPUAssignments map[string]cpuset.CPUSet // Clone returns a copy of ContainerCPUAssignments diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index c5fd99019da..54a5394503f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -293,7 +293,7 @@ func GetNUMANodeInfo() (NUMANodeInfo, error) { // nil NUMANodeInfo, indicating that no NUMA information is available // on this machine. This should implicitly be interpreted as having a // single NUMA node with id 0 for all CPUs. - nodelist, err := ioutil.ReadFile("/sys/devices/system/node/possible") + nodelist, err := ioutil.ReadFile("/sys/devices/system/node/online") if err != nil { return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology_hints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology_hints.go deleted file mode 100644 index ce70c5465f5..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology_hints.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/klog" - - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" -) - -func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { - // The 'none' policy does not generate topology hints. - if m.policy.Name() == string(PolicyNone) { - return nil - } - - // For all other policies, if there are no CPU resources requested for this - // container, we do not generate any topology hints. - if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { - return nil - } - - // Otherwise, attempt to generate TopologyHints for the CPUManager. - // For now, this implementation assumes the 'static' CPUManager policy. - // TODO: Generalize this so that its applicable to future CPUManager polices. - - // Get a count of how many guaranteed CPUs have been requested. - requested := m.policy.(*staticPolicy).guaranteedCPUs(&pod, &container) - - // If there are no guaranteed CPUs being requested, we do not generate - // any topology hints. This can happen, for example, because init - // containers don't have to have guaranteed CPUs in order for the pod - // to still be in the Guaranteed QOS tier. - if requested == 0 { - return nil - } - - // Get a list of available CPUs. - available := m.policy.(*staticPolicy).assignableCPUs(m.state) - - // Generate hints. - cpuHints := m.generateCPUTopologyHints(available, requested) - klog.Infof("[cpumanager] TopologyHints generated for pod '%v', container '%v': %v", pod.Name, container.Name, cpuHints) - - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): cpuHints, - } -} - -// generateCPUtopologyHints generates a set of TopologyHints given the set of -// available CPUs and the number of CPUs being requested. -// -// It follows the convention of marking all hints that have the same number of -// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and -// marking all others with 'Preferred: false'. -func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint { - // Initialize minAffinitySize to include all NUMA Nodes. - minAffinitySize := m.topology.CPUDetails.NUMANodes().Size() - // Initialize minSocketsOnMinAffinity to include all Sockets. - minSocketsOnMinAffinity := m.topology.CPUDetails.Sockets().Size() - - // Iterate through all combinations of socketMasks and build hints from them. - hints := []topologymanager.TopologyHint{} - socketmask.IterateSocketMasks(m.topology.CPUDetails.NUMANodes().ToSlice(), func(mask socketmask.SocketMask) { - // First, update minAffinitySize and minSocketsOnMinAffinity for the - // current request size. - cpusInMask := m.topology.CPUDetails.CPUsInNUMANodes(mask.GetSockets()...).Size() - socketsInMask := m.topology.CPUDetails.SocketsInNUMANodes(mask.GetSockets()...).Size() - if cpusInMask >= request && mask.Count() < minAffinitySize { - minAffinitySize = mask.Count() - if socketsInMask < minSocketsOnMinAffinity { - minSocketsOnMinAffinity = socketsInMask - } - } - - // Then check to see if we have enough CPUs available on the current - // SocketMask to satisfy the CPU request. - numMatching := 0 - for _, c := range availableCPUs.ToSlice() { - if mask.IsSet(m.topology.CPUDetails[c].NUMANodeID) { - numMatching++ - } - } - - // If we don't, then move onto the next combination. - if numMatching < request { - return - } - - // Otherwise, create a new hint from the SocketMask and add it to the - // list of hints. We set all hint preferences to 'false' on the first - // pass through. - hints = append(hints, topologymanager.TopologyHint{ - NUMANodeAffinity: mask, - Preferred: false, - }) - }) - - // Loop back through all hints and update the 'Preferred' field based on - // counting the number of bits sets in the affinity mask and comparing it - // to the minAffinitySize. Only those with an equal number of bits set (and - // with a minimal set of sockets) will be considered preferred. - for i := range hints { - if hints[i].NUMANodeAffinity.Count() == minAffinitySize { - nodes := hints[i].NUMANodeAffinity.GetSockets() - numSockets := m.topology.CPUDetails.SocketsInNUMANodes(nodes...).Size() - if numSockets == minSocketsOnMinAffinity { - hints[i].Preferred = true - } - } - } - - return hints -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD index ee0ca125a95..3299f7f4e9a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD @@ -16,15 +16,13 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/checkpointmanager/errors:go_default_library", "//pkg/kubelet/cm/cpumanager/topology:go_default_library", "//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library", - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", @@ -34,8 +32,11 @@ go_library( "//pkg/util/selinux:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -50,11 +51,9 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library", - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/pluginmanager:go_default_library", @@ -62,10 +61,13 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS index d081f9a626d..47978ec20b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS @@ -4,6 +4,7 @@ approvers: - jiayingz - vishh reviewers: +- klueska - mindprince - RenaudWasTaken - vikaschoudhary16 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go index f0c7bc2641c..7faaba04cdc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go @@ -27,8 +27,8 @@ import ( "google.golang.org/grpc" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" - watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" + pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" + watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" ) // Stub implementation for DevicePlugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go index ff8ba061d5d..7a5994d8836 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc" "k8s.io/klog" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" + pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" ) // endpoint maps to a single registered device plugin. It is responsible diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go index 053ec621618..03b217a788d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go @@ -31,18 +31,19 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + errorsutil "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -189,6 +190,7 @@ func (m *ManagerImpl) removeContents(dir string) error { if err != nil { return err } + var errs []error for _, name := range names { filePath := filepath.Join(dir, name) if filePath == m.checkpointFile() { @@ -204,10 +206,12 @@ func (m *ManagerImpl) removeContents(dir string) error { } err = os.RemoveAll(filePath) if err != nil { - return err + errs = append(errs, err) + klog.Errorf("Failed to remove file %s: %v", filePath, err) + continue } } - return nil + return errorsutil.NewAggregate(errs) } // checkpointFile returns device plugin checkpoint file path. @@ -231,7 +235,9 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc } socketPath := filepath.Join(m.socketdir, m.socketname) - os.MkdirAll(m.socketdir, 0755) + if err = os.MkdirAll(m.socketdir, 0750); err != nil { + return err + } if selinux.SELinuxEnabled() { if err := selinux.SetFileLabel(m.socketdir, config.KubeletPluginsDirSELinuxLabel); err != nil { klog.Warningf("Unprivileged containerized plugins might not work. Could not set selinux context on %s: %v", m.socketdir, err) @@ -277,7 +283,7 @@ func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler { } // ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource -func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { +func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string) error { klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) if !m.isVersionCompatibleWithPlugin(versions) { @@ -552,7 +558,9 @@ func (m *ManagerImpl) writeCheckpoint() error { m.mutex.Unlock() err := m.checkpointManager.CreateCheckpoint(kubeletDeviceManagerCheckpoint, data) if err != nil { - return fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err) + err2 := fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err) + klog.Warning(err2) + return err2 } return nil } @@ -674,8 +682,8 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi return devices, nil } -func (m *ManagerImpl) takeByTopology(resource string, available sets.String, affinity socketmask.SocketMask, request int) []string { - // Build a map of of NUMA Nodes to the devices associated with them. A +func (m *ManagerImpl) takeByTopology(resource string, available sets.String, affinity bitmask.BitMask, request int) []string { + // Build a map of NUMA Nodes to the devices associated with them. A // device may be associated to multiple NUMA nodes at the same time. If an // available device does not have any NUMA Nodes associated with it, add it // to a list of NUMA Nodes for the fake NUMANode -1. @@ -853,7 +861,9 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co } if needsReAllocate { klog.V(2).Infof("needs re-allocate device plugin resources for pod %s", podUID) - m.allocatePodResources(pod) + if err := m.allocatePodResources(pod); err != nil { + return nil, err + } } m.mutex.Lock() defer m.mutex.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go index 4fd4b196c4e..4e45facf434 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go @@ -20,7 +20,7 @@ import ( "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" + pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go index 38ae0e166f7..7a8d1925896 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go @@ -21,26 +21,47 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" ) // GetTopologyHints implements the TopologyManager HintProvider Interface which // ensures the Device Manager is consulted when Topology Aware Hints for each // container are created. func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { - deviceHints := make(map[string][]topologymanager.TopologyHint) + // Garbage collect any stranded device resources before providing TopologyHints + m.updateAllocatedDevices(m.activePods()) + // Loop through all device resources and generate TopologyHints for them.. + deviceHints := make(map[string][]topologymanager.TopologyHint) for resourceObj, requestedObj := range container.Resources.Limits { resource := string(resourceObj) requested := int(requestedObj.Value()) + // Only consider resources associated with a device plugin. if m.isDevicePluginResource(resource) { + // Only consider devices that actually container topology information. if aligned := m.deviceHasTopologyAlignment(resource); !aligned { klog.Infof("[devicemanager] Resource '%v' does not have a topology preference", resource) deviceHints[resource] = nil continue } + // Short circuit to regenerate the same hints if there are already + // devices allocated to the Container. This might happen after a + // kubelet restart, for example. + allocated := m.podDevices.containerDevices(string(pod.UID), container.Name, resource) + if allocated.Len() > 0 { + if allocated.Len() != requested { + klog.Errorf("[devicemanager] Resource '%v' already allocated to (pod %v, container %v) with different number than request: requested: %d, allocated: %d", resource, string(pod.UID), container.Name, requested, allocated.Len()) + deviceHints[resource] = []topologymanager.TopologyHint{} + continue + } + klog.Infof("[devicemanager] Regenerating TopologyHints for resource '%v' already allocated to (pod %v, container %v)", resource, string(pod.UID), container.Name) + deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, requested) + continue + } + + // Get the list of available devices, for which TopologyHints should be generated. available := m.getAvailableDevices(resource) if available.Len() < requested { klog.Errorf("[devicemanager] Unable to generate topology hints: requested number of devices unavailable for '%s': requested: %d, available: %d", resource, requested, available.Len()) @@ -48,6 +69,8 @@ func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[s continue } + // Generate TopologyHints for this resource given the current + // request size and the list of available devices. deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, requested) } } @@ -66,8 +89,6 @@ func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool { } func (m *ManagerImpl) getAvailableDevices(resource string) sets.String { - // Gets Devices in use. - m.updateAllocatedDevices(m.activePods()) // Strip all devices in use from the list of healthy ones. return m.healthyDevices[resource].Difference(m.allocatedDevices[resource]) } @@ -78,7 +99,7 @@ func (m *ManagerImpl) generateDeviceTopologyHints(resource string, devices sets. // Iterate through all combinations of NUMA Nodes and build hints from them. hints := []topologymanager.TopologyHint{} - socketmask.IterateSocketMasks(m.numaNodes, func(mask socketmask.SocketMask) { + bitmask.IterateBitMasks(m.numaNodes, func(mask bitmask.BitMask) { // First, update minAffinitySize for the current request size. devicesInMask := 0 for _, device := range m.allDevices[resource] { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go index f0bf4df880b..02059eeed18 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go @@ -23,7 +23,7 @@ import ( "path" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -157,20 +157,26 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName var errlist []error // os.Kill often errors out, // We try killing all the pids multiple times + removed := map[int]bool{} for i := 0; i < 5; i++ { if i != 0 { - klog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i) + klog.V(3).Infof("Attempt %v failed to kill all unwanted process from cgroup: %v. Retyring", i, podCgroup) } errlist = []error{} for _, pid := range pidsToKill { - klog.V(3).Infof("Attempt to kill process with pid: %v", pid) + if _, ok := removed[pid]; ok { + continue + } + klog.V(3).Infof("Attempt to kill process with pid: %v from cgroup: %v", pid, podCgroup) if err := m.killOnePid(pid); err != nil { - klog.V(3).Infof("failed to kill process with pid: %v", pid) + klog.V(3).Infof("failed to kill process with pid: %v from cgroup: %v", pid, podCgroup) errlist = append(errlist, err) + } else { + removed[pid] = true } } if len(errlist) == 0 { - klog.V(3).Infof("successfully killed all unwanted processes.") + klog.V(3).Infof("successfully killed all unwanted processes from cgroup: %v", podCgroup) return nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/BUILD index d69ba9ba597..5d15446fd2e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/BUILD @@ -15,7 +15,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/cm/cpumanager/topology:go_default_library", - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -33,7 +33,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/kubelet/cm/topologymanager/socketmask:all-srcs", + "//pkg/kubelet/cm/topologymanager/bitmask:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], @@ -47,11 +47,12 @@ go_test( "policy_none_test.go", "policy_restricted_test.go", "policy_single_numa_node_test.go", + "policy_test.go", "topology_manager_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/cm/topologymanager/socketmask:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS new file mode 100644 index 00000000000..453f9465dcb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- ConnorDoyle +- derekwaynecarr +- klueska +- lmdaly + +reviewers: +- nolancon diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/BUILD similarity index 87% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/BUILD index 25bd381e862..8d89cfe4751 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/BUILD @@ -2,8 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["socketmask.go"], - importpath = "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask", + srcs = ["bitmask.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask", visibility = ["//visibility:public"], ) @@ -23,6 +23,6 @@ filegroup( go_test( name = "go_default_test", - srcs = ["socketmask_test.go"], + srcs = ["bitmask_test.go"], embed = [":go_default_library"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go new file mode 100644 index 00000000000..0f01da319a3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go @@ -0,0 +1,194 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitmask + +import ( + "fmt" + "math/bits" +) + +// BitMask interface allows hint providers to create BitMasks for TopologyHints +type BitMask interface { + Add(bits ...int) error + Remove(bits ...int) error + And(masks ...BitMask) + Or(masks ...BitMask) + Clear() + Fill() + IsEqual(mask BitMask) bool + IsEmpty() bool + IsSet(bit int) bool + IsNarrowerThan(mask BitMask) bool + String() string + Count() int + GetBits() []int +} + +type bitMask uint64 + +// NewEmptyBitMask creates a new, empty BitMask +func NewEmptyBitMask() BitMask { + s := bitMask(0) + return &s +} + +// NewBitMask creates a new BitMask +func NewBitMask(bits ...int) (BitMask, error) { + s := bitMask(0) + err := (&s).Add(bits...) + if err != nil { + return nil, err + } + return &s, nil +} + +// Add adds the bits with topology affinity to the BitMask +func (s *bitMask) Add(bits ...int) error { + mask := *s + for _, i := range bits { + if i < 0 || i >= 64 { + return fmt.Errorf("bit number must be in range 0-63") + } + mask |= 1 << uint64(i) + } + *s = mask + return nil +} + +// Remove removes specified bits from BitMask +func (s *bitMask) Remove(bits ...int) error { + mask := *s + for _, i := range bits { + if i < 0 || i >= 64 { + return fmt.Errorf("bit number must be in range 0-63") + } + mask &^= 1 << uint64(i) + } + *s = mask + return nil +} + +// And performs and operation on all bits in masks +func (s *bitMask) And(masks ...BitMask) { + for _, m := range masks { + *s &= *m.(*bitMask) + } +} + +// Or performs or operation on all bits in masks +func (s *bitMask) Or(masks ...BitMask) { + for _, m := range masks { + *s |= *m.(*bitMask) + } +} + +// Clear resets all bits in mask to zero +func (s *bitMask) Clear() { + *s = 0 +} + +// Fill sets all bits in mask to one +func (s *bitMask) Fill() { + *s = bitMask(^uint64(0)) +} + +// IsEmpty checks mask to see if all bits are zero +func (s *bitMask) IsEmpty() bool { + return *s == 0 +} + +// IsSet checks bit in mask to see if bit is set to one +func (s *bitMask) IsSet(bit int) bool { + if bit < 0 || bit >= 64 { + return false + } + return (*s & (1 << uint64(bit))) > 0 +} + +// IsEqual checks if masks are equal +func (s *bitMask) IsEqual(mask BitMask) bool { + return *s == *mask.(*bitMask) +} + +// IsNarrowerThan checks if one mask is narrower than another. +// +// A mask is said to be "narrower" than another if it has lets bits set. If the +// same number of bits are set in both masks, then the mask with more +// lower-numbered bits set wins out. +func (s *bitMask) IsNarrowerThan(mask BitMask) bool { + if s.Count() == mask.Count() { + if *s < *mask.(*bitMask) { + return true + } + } + return s.Count() < mask.Count() +} + +// String converts mask to string +func (s *bitMask) String() string { + return fmt.Sprintf("%064b", *s) +} + +// Count counts number of bits in mask set to one +func (s *bitMask) Count() int { + return bits.OnesCount64(uint64(*s)) +} + +// Getbits returns each bit number with bits set to one +func (s *bitMask) GetBits() []int { + var bits []int + for i := uint64(0); i < 64; i++ { + if (*s & (1 << i)) > 0 { + bits = append(bits, int(i)) + } + } + return bits +} + +// And is a package level implementation of 'and' between first and masks +func And(first BitMask, masks ...BitMask) BitMask { + s := *first.(*bitMask) + s.And(masks...) + return &s +} + +// Or is a package level implementation of 'or' between first and masks +func Or(first BitMask, masks ...BitMask) BitMask { + s := *first.(*bitMask) + s.Or(masks...) + return &s +} + +// IterateBitMasks iterates all possible masks from a list of bits, +// issuing a callback on each mask. +func IterateBitMasks(bits []int, callback func(BitMask)) { + var iterate func(bits, accum []int, size int) + iterate = func(bits, accum []int, size int) { + if len(accum) == size { + mask, _ := NewBitMask(accum...) + callback(mask) + return + } + for i := range bits { + iterate(bits[i+1:], append(accum, bits[i]), size) + } + } + + for i := 1; i <= len(bits); i++ { + iterate(bits, []int{}, i) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go index 7ffa0a79f10..c93f3d135a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go @@ -20,10 +20,11 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) -//Policy interface for Topology Manager Pod Admit Result +// Policy interface for Topology Manager Pod Admit Result type Policy interface { - //Returns Policy Name + // Returns Policy Name Name() string - //Returns Pod Admit Handler Response based on hints and policy type - CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult + // Returns a merged TopologyHint based on input from hint providers + // and a Pod Admit Handler Response based on hints and policy type + Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go index b5f81b2fdb4..2a0d9f1d927 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go @@ -17,10 +17,15 @@ limitations under the License. package topologymanager import ( + "k8s.io/klog" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) -type bestEffortPolicy struct{} +type bestEffortPolicy struct { + //List of NUMA Nodes available on the underlying machine + numaNodes []int +} var _ Policy = &bestEffortPolicy{} @@ -28,16 +33,167 @@ var _ Policy = &bestEffortPolicy{} const PolicyBestEffort string = "best-effort" // NewBestEffortPolicy returns best-effort policy. -func NewBestEffortPolicy() Policy { - return &bestEffortPolicy{} +func NewBestEffortPolicy(numaNodes []int) Policy { + return &bestEffortPolicy{numaNodes: numaNodes} } func (p *bestEffortPolicy) Name() string { return PolicyBestEffort } -func (p *bestEffortPolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { +func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { return lifecycle.PodAdmitResult{ Admit: true, } } + +func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { + hint := mergeProvidersHints(p, p.numaNodes, providersHints) + admit := p.canAdmitPodResult(&hint) + return hint, admit +} + +// Iterate over all permutations of hints in 'allProviderHints [][]TopologyHint'. +// +// This procedure is implemented as a recursive function over the set of hints +// in 'allproviderHints[i]'. It applies the function 'callback' to each +// permutation as it is found. It is the equivalent of: +// +// for i := 0; i < len(providerHints[0]); i++ +// for j := 0; j < len(providerHints[1]); j++ +// for k := 0; k < len(providerHints[2]); k++ +// ... +// for z := 0; z < len(providerHints[-1]); z++ +// permutation := []TopologyHint{ +// providerHints[0][i], +// providerHints[1][j], +// providerHints[2][k], +// ... +// providerHints[-1][z] +// } +// callback(permutation) +func iterateAllProviderTopologyHints(allProviderHints [][]TopologyHint, callback func([]TopologyHint)) { + // Internal helper function to accumulate the permutation before calling the callback. + var iterate func(i int, accum []TopologyHint) + iterate = func(i int, accum []TopologyHint) { + // Base case: we have looped through all providers and have a full permutation. + if i == len(allProviderHints) { + callback(accum) + return + } + + // Loop through all hints for provider 'i', and recurse to build the + // the permutation of this hint with all hints from providers 'i++'. + for j := range allProviderHints[i] { + iterate(i+1, append(accum, allProviderHints[i][j])) + } + } + iterate(0, []TopologyHint{}) +} + +// Merge the hints from all hint providers to find the best one. +func mergeProvidersHints(policy Policy, numaNodes []int, providersHints []map[string][]TopologyHint) TopologyHint { + // Set the default affinity as an any-numa affinity containing the list + // of NUMA Nodes available on this machine. + defaultAffinity, _ := bitmask.NewBitMask(numaNodes...) + + // Loop through all hint providers and save an accumulated list of the + // hints returned by each hint provider. If no hints are provided, assume + // that provider has no preference for topology-aware allocation. + var allProviderHints [][]TopologyHint + for _, hints := range providersHints { + // If hints is nil, insert a single, preferred any-numa hint into allProviderHints. + if len(hints) == 0 { + klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with any resource") + allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}}) + continue + } + + // Otherwise, accumulate the hints for each resource type into allProviderHints. + for resource := range hints { + if hints[resource] == nil { + klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with resource '%s'", resource) + allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}}) + continue + } + + if len(hints[resource]) == 0 { + klog.Infof("[topologymanager] Hint Provider has no possible NUMA affinities for resource '%s'", resource) + allProviderHints = append(allProviderHints, []TopologyHint{{nil, false}}) + continue + } + + allProviderHints = append(allProviderHints, hints[resource]) + } + } + + // Iterate over all permutations of hints in 'allProviderHints'. Merge the + // hints in each permutation by taking the bitwise-and of their affinity masks. + // Return the hint with the narrowest NUMANodeAffinity of all merged + // permutations that have at least one NUMA ID set. If no merged mask can be + // found that has at least one NUMA ID set, return the 'defaultAffinity'. + bestHint := TopologyHint{defaultAffinity, false} + iterateAllProviderTopologyHints(allProviderHints, func(permutation []TopologyHint) { + // Get the NUMANodeAffinity from each hint in the permutation and see if any + // of them encode unpreferred allocations. + preferred := true + var numaAffinities []bitmask.BitMask + for _, hint := range permutation { + if hint.NUMANodeAffinity == nil { + numaAffinities = append(numaAffinities, defaultAffinity) + } else { + numaAffinities = append(numaAffinities, hint.NUMANodeAffinity) + } + + if !hint.Preferred { + preferred = false + } + + // Special case PolicySingleNumaNode to only prefer hints where + // all providers have a single NUMA affinity set. + if policy != nil && policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity != nil && hint.NUMANodeAffinity.Count() > 1 { + preferred = false + } + } + + // Merge the affinities using a bitwise-and operation. + mergedAffinity, _ := bitmask.NewBitMask(numaNodes...) + mergedAffinity.And(numaAffinities...) + + // Build a mergedHintfrom the merged affinity mask, indicating if an + // preferred allocation was used to generate the affinity mask or not. + mergedHint := TopologyHint{mergedAffinity, preferred} + + // Only consider mergedHints that result in a NUMANodeAffinity > 0 to + // replace the current bestHint. + if mergedHint.NUMANodeAffinity.Count() == 0 { + return + } + + // If the current bestHint is non-preferred and the new mergedHint is + // preferred, always choose the preferred hint over the non-preferred one. + if mergedHint.Preferred && !bestHint.Preferred { + bestHint = mergedHint + return + } + + // If the current bestHint is preferred and the new mergedHint is + // non-preferred, never update bestHint, regardless of mergedHint's + // narowness. + if !mergedHint.Preferred && bestHint.Preferred { + return + } + + // If mergedHint and bestHint has the same preference, only consider + // mergedHints that have a narrower NUMANodeAffinity than the + // NUMANodeAffinity in the current bestHint. + if !mergedHint.NUMANodeAffinity.IsNarrowerThan(bestHint.NUMANodeAffinity) { + return + } + + // In all other cases, update bestHint to the current mergedHint + bestHint = mergedHint + }) + + return bestHint +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go index aacdf19aa10..7e5010a765d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go @@ -36,8 +36,12 @@ func (p *nonePolicy) Name() string { return PolicyNone } -func (p *nonePolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { +func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { return lifecycle.PodAdmitResult{ Admit: true, } } + +func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { + return TopologyHint{}, p.canAdmitPodResult(nil) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go index 7993675ce35..cc522980c0e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go @@ -20,7 +20,9 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) -type restrictedPolicy struct{} +type restrictedPolicy struct { + bestEffortPolicy +} var _ Policy = &restrictedPolicy{} @@ -28,15 +30,15 @@ var _ Policy = &restrictedPolicy{} const PolicyRestricted string = "restricted" // NewRestrictedPolicy returns restricted policy. -func NewRestrictedPolicy() Policy { - return &restrictedPolicy{} +func NewRestrictedPolicy(numaNodes []int) Policy { + return &restrictedPolicy{bestEffortPolicy{numaNodes: numaNodes}} } func (p *restrictedPolicy) Name() string { return PolicyRestricted } -func (p *restrictedPolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { +func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { if !hint.Preferred { return lifecycle.PodAdmitResult{ Admit: false, @@ -48,3 +50,9 @@ func (p *restrictedPolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAd Admit: true, } } + +func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { + hint := mergeProvidersHints(p, p.numaNodes, providersHints) + admit := p.canAdmitPodResult(&hint) + return hint, admit +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go index 1d672a99cb2..6f29e1c27b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go @@ -20,7 +20,10 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) -type singleNumaNodePolicy struct{} +type singleNumaNodePolicy struct { + //List of NUMA Nodes available on the underlying machine + numaNodes []int +} var _ Policy = &singleNumaNodePolicy{} @@ -28,16 +31,16 @@ var _ Policy = &singleNumaNodePolicy{} const PolicySingleNumaNode string = "single-numa-node" // NewSingleNumaNodePolicy returns single-numa-node policy. -func NewSingleNumaNodePolicy() Policy { - return &singleNumaNodePolicy{} +func NewSingleNumaNodePolicy(numaNodes []int) Policy { + return &singleNumaNodePolicy{numaNodes: numaNodes} } func (p *singleNumaNodePolicy) Name() string { return PolicySingleNumaNode } -func (p *singleNumaNodePolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { - if !hint.Preferred || hint.NUMANodeAffinity.Count() > 1 { +func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { + if !hint.Preferred { return lifecycle.PodAdmitResult{ Admit: false, Reason: "Topology Affinity Error", @@ -48,3 +51,9 @@ func (p *singleNumaNodePolicy) CanAdmitPodResult(hint *TopologyHint) lifecycle.P Admit: true, } } + +func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { + hint := mergeProvidersHints(p, p.numaNodes, providersHints) + admit := p.canAdmitPodResult(&hint) + return hint, admit +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/socketmask.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/socketmask.go deleted file mode 100644 index 8befaec0bce..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask/socketmask.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package socketmask - -import ( - "fmt" - "math/bits" -) - -// SocketMask interface allows hint providers to create SocketMasks for TopologyHints -type SocketMask interface { - Add(sockets ...int) error - Remove(sockets ...int) error - And(masks ...SocketMask) - Or(masks ...SocketMask) - Clear() - Fill() - IsEqual(mask SocketMask) bool - IsEmpty() bool - IsSet(socket int) bool - IsNarrowerThan(mask SocketMask) bool - String() string - Count() int - GetSockets() []int -} - -type socketMask uint64 - -// NewEmptySocketMask creates a new, empty SocketMask -func NewEmptySocketMask() SocketMask { - s := socketMask(0) - return &s -} - -// NewSocketMask creates a new SocketMask -func NewSocketMask(sockets ...int) (SocketMask, error) { - s := socketMask(0) - err := (&s).Add(sockets...) - if err != nil { - return nil, err - } - return &s, nil -} - -// Add adds the sockets with topology affinity to the SocketMask -func (s *socketMask) Add(sockets ...int) error { - mask := *s - for _, i := range sockets { - if i < 0 || i >= 64 { - return fmt.Errorf("socket number must be in range 0-63") - } - mask |= 1 << uint64(i) - } - *s = mask - return nil -} - -// Remove removes specified sockets from SocketMask -func (s *socketMask) Remove(sockets ...int) error { - mask := *s - for _, i := range sockets { - if i < 0 || i >= 64 { - return fmt.Errorf("socket number must be in range 0-63") - } - mask &^= 1 << uint64(i) - } - *s = mask - return nil -} - -// And performs and operation on all bits in masks -func (s *socketMask) And(masks ...SocketMask) { - for _, m := range masks { - *s &= *m.(*socketMask) - } -} - -// Or performs or operation on all bits in masks -func (s *socketMask) Or(masks ...SocketMask) { - for _, m := range masks { - *s |= *m.(*socketMask) - } -} - -// Clear resets all bits in mask to zero -func (s *socketMask) Clear() { - *s = 0 -} - -// Fill sets all bits in mask to one -func (s *socketMask) Fill() { - *s = socketMask(^uint64(0)) -} - -// IsEmpty checks mask to see if all bits are zero -func (s *socketMask) IsEmpty() bool { - return *s == 0 -} - -// IsSet checks socket in mask to see if bit is set to one -func (s *socketMask) IsSet(socket int) bool { - if socket < 0 || socket >= 64 { - return false - } - return (*s & (1 << uint64(socket))) > 0 -} - -// IsEqual checks if masks are equal -func (s *socketMask) IsEqual(mask SocketMask) bool { - return *s == *mask.(*socketMask) -} - -// IsNarrowerThan checks if one mask is narrower than another. -// -// A mask is said to be "narrower" than another if it has lets bits set. If the -// same number of bits are set in both masks, then the mask with more -// lower-numbered bits set wins out. -func (s *socketMask) IsNarrowerThan(mask SocketMask) bool { - if s.Count() == mask.Count() { - if *s < *mask.(*socketMask) { - return true - } - } - return s.Count() < mask.Count() -} - -// String converts mask to string -func (s *socketMask) String() string { - return fmt.Sprintf("%064b", *s) -} - -// Count counts number of bits in mask set to one -func (s *socketMask) Count() int { - return bits.OnesCount64(uint64(*s)) -} - -// GetSockets returns each socket number with bits set to one -func (s *socketMask) GetSockets() []int { - var sockets []int - for i := uint64(0); i < 64; i++ { - if (*s & (1 << i)) > 0 { - sockets = append(sockets, int(i)) - } - } - return sockets -} - -// And is a package level implementation of 'and' between first and masks -func And(first SocketMask, masks ...SocketMask) SocketMask { - s := *first.(*socketMask) - s.And(masks...) - return &s -} - -// Or is a package level implementation of 'or' between first and masks -func Or(first SocketMask, masks ...SocketMask) SocketMask { - s := *first.(*socketMask) - s.Or(masks...) - return &s -} - -// IterateSocketMasks iterates all possible masks from a list of sockets, -// issuing a callback on each mask. -func IterateSocketMasks(sockets []int, callback func(SocketMask)) { - var iterate func(sockets, accum []int, size int) - iterate = func(sockets, accum []int, size int) { - if len(accum) == size { - mask, _ := NewSocketMask(accum...) - callback(mask) - return - } - for i := range sockets { - iterate(sockets[i+1:], append(accum, sockets[i]), size) - } - } - - for i := 1; i <= len(sockets); i++ { - iterate(sockets, []int{}, i) - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go index ab54b9be4ce..4e452d71a00 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/klog" cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) @@ -63,12 +63,19 @@ type manager struct { podMap map[string]string //Topology Manager Policy policy Policy - //List of NUMA Nodes available on the underlying machine - numaNodes []int } -//HintProvider interface is to be implemented by Hint Providers +// HintProvider is an interface for components that want to collaborate to +// achieve globally optimal concrete resource alignment with respect to +// NUMA locality. type HintProvider interface { + // GetTopologyHints returns a map of resource names to a list of possible + // concrete resource allocations in terms of NUMA locality hints. Each hint + // is optionally marked "preferred" and indicates the set of NUMA nodes + // involved in the hypothetical allocation. The topology manager calls + // this function for each hint provider, and merges the hints to produce + // a consensus "best" hint. The hint providers may subsequently query the + // topology manager to influence actual resource assignment. GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint } @@ -79,46 +86,67 @@ type Store interface { //TopologyHint is a struct containing the NUMANodeAffinity for a Container type TopologyHint struct { - NUMANodeAffinity socketmask.SocketMask + NUMANodeAffinity bitmask.BitMask // Preferred is set to true when the NUMANodeAffinity encodes a preferred // allocation for the Container. It is set to false otherwise. Preferred bool } +// IsEqual checks if TopologyHint are equal +func (th *TopologyHint) IsEqual(topologyHint TopologyHint) bool { + if th.Preferred == topologyHint.Preferred { + if th.NUMANodeAffinity == nil || topologyHint.NUMANodeAffinity == nil { + return th.NUMANodeAffinity == topologyHint.NUMANodeAffinity + } + return th.NUMANodeAffinity.IsEqual(topologyHint.NUMANodeAffinity) + } + return false +} + +// LessThan checks if TopologyHint `a` is less than TopologyHint `b` +// this means that either `a` is a preferred hint and `b` is not +// or `a` NUMANodeAffinity attribute is narrower than `b` NUMANodeAffinity attribute. +func (th *TopologyHint) LessThan(other TopologyHint) bool { + if th.Preferred != other.Preferred { + return th.Preferred == true + } + return th.NUMANodeAffinity.IsNarrowerThan(other.NUMANodeAffinity) +} + var _ Manager = &manager{} //NewManager creates a new TopologyManager based on provided policy func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string) (Manager, error) { klog.Infof("[topologymanager] Creating topology manager with %s policy", topologyPolicyName) - var policy Policy + var numaNodes []int + for node := range numaNodeInfo { + numaNodes = append(numaNodes, node) + } + + if len(numaNodes) > maxAllowableNUMANodes { + return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", maxAllowableNUMANodes) + } + + var policy Policy switch topologyPolicyName { case PolicyNone: policy = NewNonePolicy() case PolicyBestEffort: - policy = NewBestEffortPolicy() + policy = NewBestEffortPolicy(numaNodes) case PolicyRestricted: - policy = NewRestrictedPolicy() + policy = NewRestrictedPolicy(numaNodes) case PolicySingleNumaNode: - policy = NewSingleNumaNodePolicy() + policy = NewSingleNumaNodePolicy(numaNodes) default: return nil, fmt.Errorf("unknown policy: \"%s\"", topologyPolicyName) } - var numaNodes []int - for node := range numaNodeInfo { - numaNodes = append(numaNodes, node) - } - - if len(numaNodes) > maxAllowableNUMANodes { - return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", maxAllowableNUMANodes) - } - var hp []HintProvider pth := make(map[string]map[string]TopologyHint) pm := make(map[string]string) @@ -127,7 +155,6 @@ func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string podTopologyHints: pth, podMap: pm, policy: policy, - numaNodes: numaNodes, } return manager, nil @@ -137,151 +164,24 @@ func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint return m.podTopologyHints[podUID][containerName] } -// Iterate over all permutations of hints in 'allProviderHints [][]TopologyHint'. -// -// This procedure is implemented as a recursive function over the set of hints -// in 'allproviderHints[i]'. It applies the function 'callback' to each -// permutation as it is found. It is the equivalent of: -// -// for i := 0; i < len(providerHints[0]); i++ -// for j := 0; j < len(providerHints[1]); j++ -// for k := 0; k < len(providerHints[2]); k++ -// ... -// for z := 0; z < len(providerHints[-1]); z++ -// permutation := []TopologyHint{ -// providerHints[0][i], -// providerHints[1][j], -// providerHints[2][k], -// ... -// provideryHints[-1][z] -// } -// callback(permutation) -func (m *manager) iterateAllProviderTopologyHints(allProviderHints [][]TopologyHint, callback func([]TopologyHint)) { - // Internal helper function to accumulate the permutation before calling the callback. - var iterate func(i int, accum []TopologyHint) - iterate = func(i int, accum []TopologyHint) { - // Base case: we have looped through all providers and have a full permutation. - if i == len(allProviderHints) { - callback(accum) - return - } - - // Loop through all hints for provider 'i', and recurse to build the - // the permutation of this hint with all hints from providers 'i++'. - for j := range allProviderHints[i] { - iterate(i+1, append(accum, allProviderHints[i][j])) - } - } - iterate(0, []TopologyHint{}) -} - -// Merge the hints from all hint providers to find the best one. -func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) TopologyHint { - // Set the default affinity as an any-numa affinity containing the list - // of NUMA Nodes available on this machine. - defaultAffinity, _ := socketmask.NewSocketMask(m.numaNodes...) - +func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (providersHints []map[string][]TopologyHint) { // Loop through all hint providers and save an accumulated list of the - // hints returned by each hint provider. If no hints are provided, assume - // that provider has no preference for topology-aware allocation. - var allProviderHints [][]TopologyHint + // hints returned by each hint provider. for _, provider := range m.hintProviders { // Get the TopologyHints from a provider. hints := provider.GetTopologyHints(pod, container) - - // If hints is nil, insert a single, preferred any-numa hint into allProviderHints. - if len(hints) == 0 { - klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with any resource") - allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}}) - continue - } - - // Otherwise, accumulate the hints for each resource type into allProviderHints. - for resource := range hints { - if hints[resource] == nil { - klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with resource '%s'", resource) - allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}}) - continue - } - - if len(hints[resource]) == 0 { - klog.Infof("[topologymanager] Hint Provider has no possible NUMA affinities for resource '%s'", resource) - allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, false}}) - continue - } - - allProviderHints = append(allProviderHints, hints[resource]) - } + providersHints = append(providersHints, hints) + klog.Infof("[topologymanager] TopologyHints for pod '%v', container '%v': %v", pod.Name, container.Name, hints) } + return providersHints +} - // Iterate over all permutations of hints in 'allProviderHints'. Merge the - // hints in each permutation by taking the bitwise-and of their affinity masks. - // Return the hint with the narrowest NUMANodeAffinity of all merged - // permutations that have at least one NUMA ID set. If no merged mask can be - // found that has at least one NUMA ID set, return the 'defaultAffinity'. - bestHint := TopologyHint{defaultAffinity, false} - m.iterateAllProviderTopologyHints(allProviderHints, func(permutation []TopologyHint) { - // Get the NUMANodeAffinity from each hint in the permutation and see if any - // of them encode unpreferred allocations. - preferred := true - var numaAffinities []socketmask.SocketMask - for _, hint := range permutation { - // Only consider hints that have an actual NUMANodeAffinity set. - if hint.NUMANodeAffinity != nil { - if !hint.Preferred { - preferred = false - } - // Special case PolicySingleNumaNode to only prefer hints where - // all providers have a single NUMA affinity set. - if m.policy != nil && m.policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity.Count() > 1 { - preferred = false - } - numaAffinities = append(numaAffinities, hint.NUMANodeAffinity) - } - } - - // Merge the affinities using a bitwise-and operation. - mergedAffinity, _ := socketmask.NewSocketMask(m.numaNodes...) - mergedAffinity.And(numaAffinities...) - - // Build a mergedHintfrom the merged affinity mask, indicating if an - // preferred allocation was used to generate the affinity mask or not. - mergedHint := TopologyHint{mergedAffinity, preferred} - - // Only consider mergedHints that result in a NUMANodeAffinity > 0 to - // replace the current bestHint. - if mergedHint.NUMANodeAffinity.Count() == 0 { - return - } - - // If the current bestHint is non-preferred and the new mergedHint is - // preferred, always choose the preferred hint over the non-preferred one. - if mergedHint.Preferred && !bestHint.Preferred { - bestHint = mergedHint - return - } - - // If the current bestHint is preferred and the new mergedHint is - // non-preferred, never update bestHint, regardless of mergedHint's - // narowness. - if !mergedHint.Preferred && bestHint.Preferred { - return - } - - // If mergedHint and bestHint has the same preference, only consider - // mergedHints that have a narrower NUMANodeAffinity than the - // NUMANodeAffinity in the current bestHint. - if !mergedHint.NUMANodeAffinity.IsNarrowerThan(bestHint.NUMANodeAffinity) { - return - } - - // In all other cases, update bestHint to the current mergedHint - bestHint = mergedHint - }) - +// Collect Hints from hint providers and pass to policy to retrieve the best one. +func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) (TopologyHint, lifecycle.PodAdmitResult) { + providersHints := m.accumulateProvidersHints(pod, container) + bestHint, admit := m.policy.Merge(providersHints) klog.Infof("[topologymanager] ContainerTopologyHint: %v", bestHint) - - return bestHint + return bestHint, admit } func (m *manager) AddHintProvider(h HintProvider) { @@ -311,23 +211,16 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR } pod := attrs.Pod c := make(map[string]TopologyHint) - klog.Infof("[topologymanager] Pod QoS Level: %v", pod.Status.QOSClass) - - if pod.Status.QOSClass == v1.PodQOSGuaranteed { - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - result := m.calculateAffinity(*pod, container) - admitPod := m.policy.CanAdmitPodResult(&result) - if !admitPod.Admit { - return admitPod - } - c[container.Name] = result - } - m.podTopologyHints[string(pod.UID)] = c - klog.Infof("[topologymanager] Topology Affinity for Pod: %v are %v", pod.UID, m.podTopologyHints[string(pod.UID)]) - } else { - klog.Infof("[topologymanager] Topology Manager only affinitises Guaranteed pods.") + for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { + result, admitPod := m.calculateAffinity(*pod, container) + if !admitPod.Admit { + return admitPod + } + c[container.Name] = result } + m.podTopologyHints[string(pod.UID)] = c + klog.Infof("[topologymanager] Topology Affinity for Pod: %v are %v", pod.UID, m.podTopologyHints[string(pod.UID)]) return lifecycle.PodAdmitResult{ Admit: true, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD index 3332cdc4af7..ffaa1586459 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD @@ -13,6 +13,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/util", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD index 9cc9827deab..c3d577a4ded 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD @@ -47,6 +47,10 @@ go_library( "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/io:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/github.com/fsnotify/fsnotify:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", @@ -88,6 +92,9 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD index 65d24fb61eb..4ca7352fb16 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD @@ -39,6 +39,7 @@ go_library( "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", "//third_party/forked/golang/expansion:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go index 67f4b4ec755..71475aa9d56 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" hashutil "k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/third_party/forked/golang/expansion" + utilsnet "k8s.io/utils/net" ) // HandlerRunner runs a lifecycle handler for a container. @@ -45,7 +46,7 @@ type HandlerRunner interface { // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, cleanupAction func(), err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host // of a pod. @@ -319,16 +320,28 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) { HostIP: p.HostIP, } + // We need to determine the address family this entry applies to. We do this to ensure + // duplicate containerPort / protocol rules work across different address families. + // https://github.com/kubernetes/kubernetes/issues/82373 + family := "any" + if p.HostIP != "" { + if utilsnet.IsIPv6String(p.HostIP) { + family = "v6" + } else { + family = "v4" + } + } + // We need to create some default port name if it's not specified, since - // this is necessary for rkt. - // http://issue.k8s.io/7710 + // this is necessary for the dockershim CNI driver. + // https://github.com/kubernetes/kubernetes/pull/82374#issuecomment-529496888 if p.Name == "" { - pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort) + pm.Name = fmt.Sprintf("%s-%s-%s:%d", container.Name, family, p.Protocol, p.ContainerPort) } else { pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name) } - // Protect against exposing the same protocol-port more than once in a container. + // Protect against a port name being used more than once in a container. if _, ok := names[pm.Name]; ok { klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) continue diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index 9699a5b5cfe..71f6dc20f7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -63,6 +63,9 @@ type Runtime interface { // Type returns the type of the container runtime. Type() string + //SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not. + SupportsSingleFileMapping() bool + // Version returns the version information of the container runtime. Version() (Version, error) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go index e7e22e203c2..1824b8b84b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go @@ -177,6 +177,10 @@ func (f *FakeRuntime) Type() string { return f.RuntimeType } +func (f *FakeRuntime) SupportsSingleFileMapping() bool { + return true +} + func (f *FakeRuntime) Version() (kubecontainer.Version, error) { f.Lock() defer f.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime_helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime_helper.go index 5ebdf12db1e..0009ee61e0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -34,7 +34,7 @@ type FakeRuntimeHelper struct { Err error } -func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) { +func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { opts.PodContainerDir = f.PodContainerDir diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go index 45c4b485bf2..aa363649e30 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go @@ -47,6 +47,11 @@ func (r *Mock) Type() string { return args.Get(0).(string) } +func (r *Mock) SupportsSingleFileMapping() bool { + args := r.Called() + return args.Get(0).(bool) +} + func (r *Mock) Version() (kubecontainer.Version, error) { args := r.Called() return args.Get(0).(kubecontainer.Version), args.Error(1) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD index 3d82bfedd4d..751d42eeead 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD @@ -123,6 +123,9 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/api/core/v1:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD index 80736ce2410..bdf49dcae40 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD @@ -16,7 +16,14 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", deps = select({ "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", + "//pkg/kubelet/qos:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/kubelet/dockershim/libdocker:go_default_library", @@ -27,6 +34,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/kubelet/dockershim/libdocker:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go index 5d238639762..0f07a4ca236 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go @@ -112,7 +112,7 @@ type NetworkPluginSettings struct { NonMasqueradeCIDR string // PluginName is the name of the plugin, runtime shim probes for PluginName string - // PluginBinDirsString is a list of directiores delimited by commas, in + // PluginBinDirString is a list of directiores delimited by commas, in // which the binaries for the plugin with PluginName may be found. PluginBinDirString string // PluginBinDirs is an array of directories in which the binaries for @@ -419,7 +419,7 @@ func (ds *dockerService) Start() error { } // initCleanup is responsible for cleaning up any crufts left by previous -// runs. If there are any errros, it simply logs them. +// runs. If there are any errors, it simply logs them. func (ds *dockerService) initCleanup() { errors := ds.platformSpecificContainerInitCleanup() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go index 2df59c6b05d..76b2fecd11b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go @@ -42,6 +42,8 @@ type streamingRuntime struct { var _ streaming.Runtime = &streamingRuntime{} +const maxMsgSize = 1024 * 1024 * 16 + func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { return r.exec(containerID, cmd, in, out, err, tty, resize, 0) } @@ -78,8 +80,8 @@ func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncReq var stdoutBuffer, stderrBuffer bytes.Buffer err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd, nil, // in - ioutils.WriteCloserWrapper(&stdoutBuffer), - ioutils.WriteCloserWrapper(&stderrBuffer), + ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stdoutBuffer, maxMsgSize)), + ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stderrBuffer, maxMsgSize)), false, // tty nil, // resize timeout) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go index 3d78d183bc8..151bd86066c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go @@ -28,7 +28,7 @@ import ( func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { stderr := new(bytes.Buffer) - err := r.exec(podSandboxID, []string{"wincat.exe", "localhost", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0) + err := r.exec(podSandboxID, []string{"wincat.exe", "127.0.0.1", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0) if err != nil { return fmt.Errorf("%v: %s", err, stderr.String()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go index 04003d9fd51..623e0b56b44 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go @@ -54,6 +54,6 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { return "", fmt.Errorf("unsupported platform") } -// applyExperimentalCreateConfig applys experimental configures from sandbox annotations. +// applyExperimentalCreateConfig applies experimental configures from sandbox annotations. func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) { } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go index 92bc5c16eaa..09ff2335f8f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go @@ -119,7 +119,7 @@ func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) []string { // Versions and feature support // ============================ - // Windows version == Windows Server, Version 1709,, Supports both sandbox and non-sandbox case + // Windows version == Windows Server, Version 1709, Supports both sandbox and non-sandbox case // Windows version == Windows Server 2016 Support only non-sandbox case // Windows version < Windows Server 2016 is Not Supported diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go index 57cdad82a48..eba29931afc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go @@ -99,6 +99,7 @@ func NewFakeDockerClient() *FakeDockerClient { Clock: clock.RealClock{}, // default this to true, so that we trace calls, image pulls and container lifecycle EnableTrace: true, + ExecInspect: &dockertypes.ContainerExecInspect{}, ImageInspects: make(map[string]*dockertypes.ImageInspect), ImageIDsNeedingAuth: make(map[string]dockertypes.AuthConfig), RandGenerator: rand.New(rand.NewSource(time.Now().UnixNano())), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD index 23d95719007..30222f96444 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go index 228856e988e..951f4520648 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -57,7 +55,7 @@ var ( Subsystem: kubeletSubsystem, Name: DockerOperationsLatencyKey, Help: "Latency in seconds of Docker operations. Broken down by operation type.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"operation_type"}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS index 358b5e2b627..08cc0a0825f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS @@ -3,9 +3,10 @@ approvers: - thockin - dchen1107 -- matchstick - freehan - dcbw +emeritus_approvers: +- matchstick reviewers: - sig-network-reviewers labels: diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD index ddc0d00514e..1b3df59a867 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD @@ -39,6 +39,23 @@ go_test( srcs = ["cni_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/apis/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/container/testing:go_default_library", + "//pkg/kubelet/dockershim/network:go_default_library", + "//pkg/kubelet/dockershim/network/cni/testing:go_default_library", + "//pkg/kubelet/dockershim/network/hostport:go_default_library", + "//pkg/kubelet/dockershim/network/testing:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", + "//vendor/github.com/stretchr/testify/mock:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go index 48d95b5a8a4..5c471f9272d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go @@ -191,7 +191,7 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error) } } if len(confList.Plugins) == 0 { - klog.Warningf("CNI config list %s has no networks, skipping", confFile) + klog.Warningf("CNI config list %s has no networks, skipping", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))])) continue } @@ -199,7 +199,7 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error) // all plugins of this config exist on disk caps, err := cniConfig.ValidateNetworkList(context.TODO(), confList) if err != nil { - klog.Warningf("Error validating CNI config %v: %v", confList, err) + klog.Warningf("Error validating CNI config list %s: %v", string(confList.Bytes[:maxStringLengthInLog(len(confList.Bytes))]), err) continue } @@ -306,14 +306,17 @@ func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubec return fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) } + // Todo get the timeout from parent ctx + cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second) + defer cancelFunc() // Windows doesn't have loNetwork. It comes only with Linux if plugin.loNetwork != nil { - if _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath, annotations, options); err != nil { + if _, err = plugin.addToNetwork(cniTimeoutCtx, plugin.loNetwork, name, namespace, id, netnsPath, annotations, options); err != nil { return err } } - _, err = plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, annotations, options) + _, err = plugin.addToNetwork(cniTimeoutCtx, plugin.getDefaultNetwork(), name, namespace, id, netnsPath, annotations, options) return err } @@ -328,22 +331,25 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku klog.Warningf("CNI failed to retrieve network namespace path: %v", err) } + // Todo get the timeout from parent ctx + cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second) + defer cancelFunc() // Windows doesn't have loNetwork. It comes only with Linux if plugin.loNetwork != nil { // Loopback network deletion failure should not be fatal on teardown - if err := plugin.deleteFromNetwork(plugin.loNetwork, name, namespace, id, netnsPath, nil); err != nil { + if err := plugin.deleteFromNetwork(cniTimeoutCtx, plugin.loNetwork, name, namespace, id, netnsPath, nil); err != nil { klog.Warningf("CNI failed to delete loopback network: %v", err) } } - return plugin.deleteFromNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil) + return plugin.deleteFromNetwork(cniTimeoutCtx, plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil) } func podDesc(namespace, name string, id kubecontainer.ContainerID) string { return fmt.Sprintf("%s_%s/%s", namespace, name, id.ID) } -func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (cnitypes.Result, error) { +func (plugin *cniNetworkPlugin) addToNetwork(ctx context.Context, network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (cnitypes.Result, error) { rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, options) if err != nil { klog.Errorf("Error adding network when building cni runtime conf: %v", err) @@ -353,7 +359,7 @@ func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig klog.V(4).Infof("Adding %s to network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) - res, err := cniNet.AddNetworkList(context.TODO(), netConf, rt) + res, err := cniNet.AddNetworkList(ctx, netConf, rt) if err != nil { klog.Errorf("Error adding %s to network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) return nil, err @@ -362,7 +368,7 @@ func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string return res, nil } -func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) error { +func (plugin *cniNetworkPlugin) deleteFromNetwork(ctx context.Context, network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) error { rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, nil) if err != nil { klog.Errorf("Error deleting network when building cni runtime conf: %v", err) @@ -372,7 +378,7 @@ func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName s pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig klog.V(4).Infof("Deleting %s from network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) - err = cniNet.DelNetworkList(context.TODO(), netConf, rt) + err = cniNet.DelNetworkList(ctx, netConf, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. if err != nil && !strings.Contains(err.Error(), "no such file or directory") { @@ -456,3 +462,13 @@ func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string return rt, nil } + +func maxStringLengthInLog(length int) int { + // we allow no more than 4096-length strings to be logged + const maxStringLength = 4096 + + if length < maxStringLength { + return length + } + return maxStringLength +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go index 6a75ef1e3cd..55487454310 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go @@ -74,6 +74,10 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin return nil, err } + if len(ips) == 0 { + return nil, fmt.Errorf("cannot find pod IPs in the network namespace, skipping pod network status for container %q", id) + } + return &network.PodNetworkStatus{ IP: ips[0], IPs: ips, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go index e39e878e1ce..316d4df9b99 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go @@ -19,7 +19,9 @@ limitations under the License. package cni import ( + "context" "fmt" + "time" cniTypes020 "github.com/containernetworking/cni/pkg/types/020" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" @@ -43,8 +45,15 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) } - result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil, nil) + if plugin.getDefaultNetwork() == nil { + return nil, fmt.Errorf("CNI network not yet initialized, skipping pod network status for container %q", id) + } + // Because the default remote runtime request timeout is 4 min,so set slightly less than 240 seconds + // Todo get the timeout from parent ctx + cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second) + defer cancelFunc() + result, err := plugin.addToNetwork(cniTimeoutCtx, plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil, nil) klog.V(5).Infof("GetPodNetworkStatus result %+v", result) if err != nil { klog.Errorf("error while adding to cni network: %s", err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/fake_iptables.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/fake_iptables.go index 3b7dc695116..0c96e7a78b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/fake_iptables.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/fake_iptables.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "strings" + "time" "k8s.io/apimachinery/pkg/util/sets" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -39,6 +40,7 @@ type fakeTable struct { type fakeIPTables struct { tables map[string]*fakeTable builtinChains map[string]sets.String + ipv6 bool } func NewFakeIPTables() *fakeIPTables { @@ -49,6 +51,7 @@ func NewFakeIPTables() *fakeIPTables { string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"), string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"), }, + ipv6: false, } } @@ -221,7 +224,7 @@ func (f *fakeIPTables) DeleteRule(tableName utiliptables.Table, chainName utilip } func (f *fakeIPTables) IsIpv6() bool { - return false + return f.ipv6 } func saveChain(chain *fakeChain, data *bytes.Buffer) { @@ -335,10 +338,7 @@ func (f *fakeIPTables) RestoreAll(data []byte, flush utiliptables.FlushFlag, cou return f.restore("", data, flush) } -func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) { -} - -func (f *fakeIPTables) Destroy() { +func (f *fakeIPTables) Monitor(canary utiliptables.Chain, tables []utiliptables.Table, reloadFunc func(), interval time.Duration, stopCh <-chan struct{}) { } func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go index f5f12fe3a83..3c01b6e866d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go @@ -23,7 +23,7 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -136,7 +136,11 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName } if natInterfaceName != "" && natInterfaceName != "lo" { // Need to SNAT traffic from localhost - args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"} + localhost := "127.0.0.0/8" + if iptables.IsIpv6() { + localhost = "::1/128" + } + args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", localhost, "-j", "MASQUERADE"} if _, err := iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { return fmt.Errorf("failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go index 1f0bec2a96b..9d1286aaaf2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go @@ -21,11 +21,12 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "net" "strconv" "strings" "sync" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" @@ -82,10 +83,16 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt return nil } - if podPortMapping.IP.To4() == nil { + // IP.To16() returns nil if IP is not a valid IPv4 or IPv6 address + if podPortMapping.IP.To16() == nil { return fmt.Errorf("invalid or missing IP of pod %s", podFullName) } podIP := podPortMapping.IP.String() + isIpv6 := utilnet.IsIPv6(podPortMapping.IP) + + if isIpv6 != hm.iptables.IsIpv6() { + return fmt.Errorf("HostPortManager IP family mismatch: %v, isIPv6 - %v", podIP, isIpv6) + } if err = ensureKubeHostportChains(hm.iptables, natInterfaceName); err != nil { return err @@ -142,10 +149,11 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt "-j", string(iptablesproxy.KubeMarkMasqChain)) // DNAT to the podIP:containerPort + hostPortBinding := net.JoinHostPort(podIP, strconv.Itoa(int(pm.ContainerPort))) writeLine(natRules, "-A", string(chain), "-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort), "-m", protocol, "-p", protocol, - "-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", podIP, pm.ContainerPort)) + "-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding)) } // getHostportChain should be able to provide unique hostport chain name using hash @@ -166,7 +174,6 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt // clean up opened host port if encounter any error return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)}) } - isIpv6 := utilnet.IsIPv6(podPortMapping.IP) // Remove conntrack entries just after adding the new iptables rules. If the conntrack entry is removed along with // the IP tables rule, it can be the case that the packets received by the node after iptables rule removal will diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go index 002a0f441d3..2d8b1bf4368 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go @@ -21,15 +21,17 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "net" "strconv" "strings" "time" "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables" + utilnet "k8s.io/utils/net" ) // HostportSyncer takes a list of PodPortMappings and implements hostport all at once @@ -118,12 +120,18 @@ func getPodFullName(pod *PodPortMapping) string { // gatherAllHostports returns all hostports that should be presented on node, // given the list of pods running on that node and ignoring host network -// pods (which don't need hostport <-> container port mapping). -func gatherAllHostports(activePodPortMappings []*PodPortMapping) (map[*PortMapping]targetPod, error) { +// pods (which don't need hostport <-> container port mapping) +// It only returns the hosports that match the IP family passed as parameter +func gatherAllHostports(activePodPortMappings []*PodPortMapping, isIPv6 bool) (map[*PortMapping]targetPod, error) { podHostportMap := make(map[*PortMapping]targetPod) for _, pm := range activePodPortMappings { - if pm.IP.To4() == nil { - return nil, fmt.Errorf("invalid or missing pod %s IP", getPodFullName(pm)) + // IP.To16() returns nil if IP is not a valid IPv4 or IPv6 address + if pm.IP.To16() == nil { + return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm)) + } + // return only entries from the same IP family + if utilnet.IsIPv6(pm.IP) != isIPv6 { + continue } // should not handle hostports for hostnetwork pods if pm.HostNetwork { @@ -191,7 +199,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap klog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) }() - hostportPodMap, err := gatherAllHostports(activePodPortMappings) + hostportPodMap, err := gatherAllHostports(activePodPortMappings, h.iptables.IsIpv6()) if err != nil { return err } @@ -227,6 +235,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap for port, target := range hostportPodMap { protocol := strings.ToLower(string(port.Protocol)) hostportChain := hostportChainName(port, target.podFullName) + if chain, ok := existingNATChains[hostportChain]; ok { writeBytesLine(natChains, chain) } else { @@ -256,11 +265,12 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap // Create hostport chain to DNAT traffic to final destination // IPTables will maintained the stats for this chain + hostPortBinding := net.JoinHostPort(target.podIP, strconv.Itoa(int(port.ContainerPort))) args = []string{ "-A", string(hostportChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort), "-m", protocol, "-p", protocol, - "-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", target.podIP, port.ContainerPort), + "-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding), } writeLine(natRules, args...) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD index f42ee784d77..358c95116d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD @@ -16,9 +16,27 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet", deps = select({ "@io_bazel_rules_go//go/platform:android": [ + "//pkg/features:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim/network:go_default_library", + "//pkg/kubelet/dockershim/network/hostport:go_default_library", + "//pkg/util/bandwidth:go_default_library", + "//pkg/util/ebtables:go_default_library", + "//pkg/util/iptables:go_default_library", + "//pkg/util/sysctl:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/github.com/containernetworking/cni/libcni:go_default_library", + "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", + "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", + "//vendor/github.com/vishvananda/netlink:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/kubelet/apis/config:go_default_library", @@ -35,6 +53,11 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim/network:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/kubelet/apis/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/dockershim/network:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/features:go_default_library", "//pkg/kubelet/apis/config:go_default_library", @@ -42,7 +65,6 @@ go_library( "//pkg/kubelet/dockershim/network:go_default_library", "//pkg/kubelet/dockershim/network/hostport:go_default_library", "//pkg/util/bandwidth:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/ebtables:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/sysctl:go_default_library", @@ -98,6 +120,24 @@ go_test( srcs = ["kubenet_linux_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/apis/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/dockershim/network:go_default_library", + "//pkg/kubelet/dockershim/network/cni/testing:go_default_library", + "//pkg/kubelet/dockershim/network/hostport/testing:go_default_library", + "//pkg/kubelet/dockershim/network/testing:go_default_library", + "//pkg/util/bandwidth:go_default_library", + "//pkg/util/iptables/testing:go_default_library", + "//pkg/util/sysctl/testing:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/github.com/containernetworking/cni/libcni:go_default_library", + "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/mock:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 8e8dc4accac..263dc01b78a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -41,7 +41,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/dockershim/network" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport" "k8s.io/kubernetes/pkg/util/bandwidth" - utildbus "k8s.io/kubernetes/pkg/util/dbus" utilebtables "k8s.io/kubernetes/pkg/util/ebtables" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" @@ -80,10 +79,7 @@ const ( "ipam": { "type": "host-local", "ranges": [%s], - "routes": [ - { "dst": "%s" }, - { "dst": "%s" } - ] + "routes": [%s] } }` ) @@ -108,12 +104,14 @@ type kubenetNetworkPlugin struct { // kubenet can use either hostportSyncer and hostportManager to implement hostports // Currently, if network host supports legacy features, hostportSyncer will be used, // otherwise, hostportManager will be used. - hostportSyncer hostport.HostportSyncer - hostportManager hostport.HostPortManager - iptables utiliptables.Interface - iptablesv6 utiliptables.Interface - sysctl utilsysctl.Interface - ebtables utilebtables.Interface + hostportSyncer hostport.HostportSyncer + hostportSyncerv6 hostport.HostportSyncer + hostportManager hostport.HostPortManager + hostportManagerv6 hostport.HostPortManager + iptables utiliptables.Interface + iptablesv6 utiliptables.Interface + sysctl utilsysctl.Interface + ebtables utilebtables.Interface // binDirs is passed by kubelet cni-bin-dir parameter. // kubenet will search for CNI binaries in DefaultCNIDir first, then continue to binDirs. binDirs []string @@ -125,9 +123,8 @@ type kubenetNetworkPlugin struct { func NewPlugin(networkPluginDirs []string, cacheDir string) network.NetworkPlugin { execer := utilexec.New() - dbus := utildbus.New() - iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4) - iptInterfacev6 := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv6) + iptInterface := utiliptables.New(execer, utiliptables.ProtocolIpv4) + iptInterfacev6 := utiliptables.New(execer, utiliptables.ProtocolIpv6) return &kubenetNetworkPlugin{ podIPs: make(map[kubecontainer.ContainerID]utilsets.String), execer: utilexec.New(), @@ -136,7 +133,9 @@ func NewPlugin(networkPluginDirs []string, cacheDir string) network.NetworkPlugi sysctl: utilsysctl.New(), binDirs: append([]string{DefaultCNIDir}, networkPluginDirs...), hostportSyncer: hostport.NewHostportSyncer(iptInterface), + hostportSyncerv6: hostport.NewHostportSyncer(iptInterfacev6), hostportManager: hostport.NewHostportManager(iptInterface), + hostportManagerv6: hostport.NewHostportManager(iptInterfacev6), nonMasqueradeCIDR: "10.0.0.0/8", cacheDir: cacheDir, podCIDRs: make([]*net.IPNet, 0), @@ -283,7 +282,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf //setup hairpinMode setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth - json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), zeroCIDRv4, zeroCIDRv6) + json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig()) klog.V(4).Infof("CNI network config set to %v", json) plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) if err != nil { @@ -459,8 +458,14 @@ func (plugin *kubenetNetworkPlugin) addPortMapping(id kubecontainer.ContainerID, IP: net.ParseIP(ip), HostNetwork: false, } - if err := plugin.hostportManager.Add(id.ID, pm, BridgeName); err != nil { - return err + if netutils.IsIPv6(pm.IP) { + if err := plugin.hostportManagerv6.Add(id.ID, pm, BridgeName); err != nil { + return err + } + } else { + if err := plugin.hostportManager.Add(id.ID, pm, BridgeName); err != nil { + return err + } } } @@ -502,37 +507,57 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k // Loopback network deletion failure should not be fatal on teardown if err := plugin.delContainerFromNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil { klog.Warningf("Failed to delete loopback network: %v", err) + errList = append(errList, err) + } // no ip dependent actions if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil { + klog.Warningf("Failed to delete %q network: %v", network.DefaultInterfaceName, err) errList = append(errList, err) } - portMappings, err := plugin.host.GetPodPortMappings(id.ID) - if err != nil { - errList = append(errList, err) - } else if len(portMappings) > 0 { - if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{ - Namespace: namespace, - Name: name, - PortMappings: portMappings, - HostNetwork: false, - }); err != nil { - errList = append(errList, err) - } - } - + // If there are no IPs registered we can't teardown pod's IP dependencies iplist, exists := plugin.getCachedPodIPs(id) if !exists || len(iplist) == 0 { klog.V(5).Infof("container %s (%s/%s) does not have recorded. ignoring teardown call", id, name, namespace) return nil } + // get the list of port mappings + portMappings, err := plugin.host.GetPodPortMappings(id.ID) + if err != nil { + errList = append(errList, err) + } + + // process each pod IP for _, ip := range iplist { + isV6 := netutils.IsIPv6String(ip) + klog.V(5).Infof("Removing pod port mappings from IP %s", ip) + if portMappings != nil && len(portMappings) > 0 { + if isV6 { + if err = plugin.hostportManagerv6.Remove(id.ID, &hostport.PodPortMapping{ + Namespace: namespace, + Name: name, + PortMappings: portMappings, + HostNetwork: false, + }); err != nil { + errList = append(errList, err) + } + } else { + if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{ + Namespace: namespace, + Name: name, + PortMappings: portMappings, + HostNetwork: false, + }); err != nil { + errList = append(errList, err) + } + } + } + klog.V(5).Infof("Removing pod IP %s from shaper for (%s/%s)", ip, name, namespace) // shaper uses a cidr, but we are using a single IP. - isV6 := netutils.IsIPv6String(ip) mask := "32" if isV6 { mask = "128" @@ -692,8 +717,11 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network } klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) - - res, err := plugin.cniConfig.AddNetwork(context.TODO(), config, rt) + // Because the default remote runtime request timeout is 4 min,so set slightly less than 240 seconds + // Todo get the timeout from parent ctx + cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second) + defer cancelFunc() + res, err := plugin.cniConfig.AddNetwork(cniTimeoutCtx, config, rt) if err != nil { return nil, fmt.Errorf("error adding container to network: %v", err) } @@ -707,7 +735,11 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo } klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) - err = plugin.cniConfig.DelNetwork(context.TODO(), config, rt) + // Because the default remote runtime request timeout is 4 min,so set slightly less than 240 seconds + // Todo get the timeout from parent ctx + cniTimeoutCtx, cancelFunc := context.WithTimeout(context.Background(), network.CNITimeoutSec*time.Second) + defer cancelFunc() + err = plugin.cniConfig.DelNetwork(cniTimeoutCtx, config, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. if err != nil && !strings.Contains(err.Error(), "no such file or directory") { @@ -844,6 +876,29 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string { return strings.Join(ranges[:], ",") } +// given a n cidrs assigned to nodes, +// create bridge routes configuration that conforms to them +func (plugin *kubenetNetworkPlugin) getRoutesConfig() string { + var ( + routes []string + hasV4, hasV6 bool + ) + for _, thisCIDR := range plugin.podCIDRs { + if thisCIDR.IP.To4() != nil { + hasV4 = true + } else { + hasV6 = true + } + } + if hasV4 { + routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv4)) + } + if hasV6 { + routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv6)) + } + return strings.Join(routes, ",") +} + func (plugin *kubenetNetworkPlugin) addPodIP(id kubecontainer.ContainerID, ip string) { plugin.mu.Lock() defer plugin.mu.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/BUILD index dabd4dee307..4d10bf852b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/metrics.go index 8ecd02132c3..9e6f635425b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics/metrics.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -46,7 +44,7 @@ var ( Subsystem: kubeletSubsystem, Name: NetworkPluginOperationsLatencyKey, Help: "Latency in seconds of network plugin operations. Broken down by operation type.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"operation_type"}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/network.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/network.go index 217fc762525..fbd1ba12550 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/network.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/network.go @@ -19,6 +19,9 @@ package network // TODO: Consider making this value configurable. const DefaultInterfaceName = "eth0" +// CNITimeoutSec is set to be slightly less than 240sec/4mins, which is the default remote runtime request timeout. +const CNITimeoutSec = 220 + // UseDefaultMTU is a marker value that indicates the plugin should determine its own MTU // It is the zero value, so a non-initialized value will mean "UseDefault" const UseDefaultMTU = 0 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go index 4240f67611a..0daa7d8398b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go @@ -271,18 +271,20 @@ func GetPodIPs(execer utilexec.Interface, nsenterPath, netnsPath, interfaceName return []net.IP{ip}, nil } - list := make([]net.IP, 0) - var err4, err6 error - if ipv4, err4 := getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-4"); err4 != nil { - list = append(list, ipv4) - } - - if ipv6, err6 := getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-6"); err6 != nil { - list = append(list, ipv6) + var ( + list []net.IP + errs []error + ) + for _, addrType := range []string{"-4", "-6"} { + if ip, err := getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, addrType); err == nil { + list = append(list, ip) + } else { + errs = append(errs, err) + } } if len(list) == 0 { - return nil, utilerrors.NewAggregate([]error{err4, err6}) + return nil, utilerrors.NewAggregate(errs) } return list, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD index ef281bfc6fc..a065ea173f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD @@ -48,6 +48,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/eviction", deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/resource:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", @@ -60,8 +61,6 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/util:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -71,6 +70,9 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/klog:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 4ef2a89dce6..10d4a2c852a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/stats" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) const ( @@ -137,20 +136,21 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd if kubelettypes.IsCriticalPod(attrs.Pod) { return lifecycle.PodAdmitResult{Admit: true} } - // the node has memory pressure, admit if not best-effort - if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) { + + // Conditions other than memory pressure reject all pods + nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1 + if nodeOnlyHasMemoryPressureCondition { notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } - // When node has memory pressure and TaintNodesByCondition is enabled, check BestEffort Pod's toleration: + // When node has memory pressure, check BestEffort Pod's toleration: // admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure. - if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) && - v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{ - Key: schedulerapi.TaintNodeMemoryPressure, - Effect: v1.TaintEffectNoSchedule, - }) { + if v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{ + Key: v1.TaintNodeMemoryPressure, + Effect: v1.TaintEffectNoSchedule, + }) { return lifecycle.PodAdmitResult{Admit: true} } } @@ -292,7 +292,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met) thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now) - debugLogThresholdsWithObservation("thresholds - grace periods satisified", thresholds, observations) + debugLogThresholdsWithObservation("thresholds - grace periods satisfied", thresholds, observations) // update internal state m.Lock() @@ -547,19 +547,9 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // If the pod is marked as critical and static, and support for critical pod annotations is enabled, // do not evict such pods. Static pods are not re-admitted after evictions. // https://github.com/kubernetes/kubernetes/issues/40573 has more details. - if kubelettypes.IsStaticPod(pod) { - // need mirrorPod to check its "priority" value; static pod doesn't carry it - if mirrorPod, ok := m.mirrorPodFunc(pod); ok && mirrorPod != nil { - // skip only when it's a static and critical pod - if kubelettypes.IsCriticalPod(mirrorPod) { - klog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) - return false - } - } else { - // we should never hit this - klog.Errorf("eviction manager: cannot get mirror pod from static pod %s, so cannot evict it", format.Pod(pod)) - return false - } + if kubelettypes.IsCriticalPod(pod) { + klog.Errorf("eviction manager: cannot evict a critical pod %s", format.Pod(pod)) + return false } status := v1.PodStatus{ Phase: v1.PodFailed, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go index dfdb8ce3b60..65a00b32951 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -23,14 +23,14 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog" + "k8s.io/kubernetes/pkg/api/v1/pod" v1resource "k8s.io/kubernetes/pkg/api/v1/resource" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - schedulerutils "k8s.io/kubernetes/pkg/scheduler/util" volumeutils "k8s.io/kubernetes/pkg/volume/util" ) @@ -329,7 +329,7 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity { } // localVolumeNames returns the set of volumes for the pod that are local -// TODO: sumamry API should report what volumes consume local storage rather than hard-code here. +// TODO: summary API should report what volumes consume local storage rather than hard-code here. func localVolumeNames(pod *v1.Pod) []string { result := []string{} for _, volume := range pod.Spec.Volumes { @@ -512,8 +512,8 @@ func (ms *multiSorter) Less(i, j int) bool { // priority compares pods by Priority, if priority is enabled. func priority(p1, p2 *v1.Pod) int { - priority1 := schedulerutils.GetPodPriority(p1) - priority2 := schedulerutils.GetPodPriority(p2) + priority1 := pod.GetPodPriority(p1) + priority2 := pod.GetPodPriority(p2) if priority1 == priority2 { return 0 } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go index 8ac1ac6cf25..7ca6704d2db 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go @@ -133,7 +133,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { } // wait waits up to notifierRefreshInterval for an event on the Epoll FD for the -// eventfd we are concerned about. It returns an error if one occurrs, and true +// eventfd we are concerned about. It returns an error if one occurs, and true // if the consumer should read from the eventfd. func wait(epfd, eventfd int, timeout time.Duration) (bool, error) { events := make([]unix.EpollEvent, numFdEvents+1) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index c2acd358e59..f77ad998876 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -33,8 +33,12 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/integer" + "k8s.io/utils/mount" + utilnet "k8s.io/utils/net" + v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -54,10 +58,10 @@ import ( cloudprovider "k8s.io/cloud-provider" internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/klog" + pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" - pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" @@ -106,12 +110,9 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/volumemanager" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/security/apparmor" sysctlwhitelist "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl" - utildbus "k8s.io/kubernetes/pkg/util/dbus" utilipt "k8s.io/kubernetes/pkg/util/iptables" - "k8s.io/kubernetes/pkg/util/mount" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/selinux" @@ -120,8 +121,6 @@ import ( "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilexec "k8s.io/utils/exec" - "k8s.io/utils/integer" ) const ( @@ -459,7 +458,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0) go r.Run(wait.NeverStop) } - nodeInfo := &CachedNodeInfo{NodeLister: corelisters.NewNodeLister(nodeIndexer)} + nodeLister := corelisters.NewNodeLister(nodeIndexer) // TODO: get the real node object of ourself, // and use the real node name and UID. @@ -487,7 +486,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, httpClient := &http.Client{} parsedNodeIP := net.ParseIP(nodeIP) protocol := utilipt.ProtocolIpv4 - if parsedNodeIP != nil && parsedNodeIP.To4() == nil { + if utilnet.IsIPv6(parsedNodeIP) { klog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP) protocol = utilipt.ProtocolIpv6 } @@ -507,7 +506,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, registerSchedulable: registerSchedulable, dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), serviceLister: serviceLister, - nodeInfo: nodeInfo, + nodeLister: nodeLister, masterServiceNamespace: masterServiceNamespace, streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, recorder: kubeDeps.Recorder, @@ -537,7 +536,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeIPValidator: validateNodeIP, clock: clock.RealClock{}, enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, - iptClient: utilipt.New(utilexec.New(), utildbus.New(), protocol), + iptClient: utilipt.New(utilexec.New(), protocol), makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, iptablesMasqueradeBit: int(kubeCfg.IPTablesMasqueradeBit), iptablesDropBit: int(kubeCfg.IPTablesDropBit), @@ -584,6 +583,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) klet.livenessManager = proberesults.NewManager() + klet.startupManager = proberesults.NewManager() klet.podCache = kubecontainer.NewCache() var checkpointManager checkpointmanager.CheckpointManager @@ -594,7 +594,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } } // podManager is also responsible for keeping secretManager and configMapManager contents up-to-date. - klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager, configMapManager, checkpointManager) + mirrorPodClient := kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister) + klet.podManager = kubepod.NewBasicPodManager(mirrorPodClient, secretManager, configMapManager, checkpointManager) klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet) @@ -672,6 +673,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, runtime, err := kuberuntime.NewKubeGenericRuntimeManager( kubecontainer.FilterEventRecorder(kubeDeps.Recorder), klet.livenessManager, + klet.startupManager, seccompProfileRoot, containerRefManager, machineInfo, @@ -778,6 +780,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.probeManager = prober.NewManager( klet.statusManager, klet.livenessManager, + klet.startupManager, klet.runner, containerRefManager, kubeDeps.Recorder) @@ -794,7 +797,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } klet.pluginManager = pluginmanager.NewPluginManager( klet.getPluginsRegistrationDir(), /* sockDir */ - klet.getPluginsDir(), /* deprecatedSockDir */ kubeDeps.Recorder, ) @@ -875,13 +877,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.appArmorValidator = apparmor.NewValidator(containerRuntime) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime)) - - if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) { - klet.nodeLeaseController = nodelease.NewController(klet.clock, klet.heartbeatClient, string(klet.nodeName), kubeCfg.NodeLeaseDurationSeconds, klet.onRepeatedHeartbeatFailure) - } - klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime)) + klet.nodeLeaseController = nodelease.NewController(klet.clock, klet.heartbeatClient, string(klet.nodeName), kubeCfg.NodeLeaseDurationSeconds, klet.onRepeatedHeartbeatFailure) + // Finally, put the most recent version of the config on the Kubelet, so // people can see how it was configured. klet.kubeletConfiguration = *kubeCfg @@ -960,8 +959,8 @@ type Kubelet struct { masterServiceNamespace string // serviceLister knows how to list services serviceLister serviceLister - // nodeInfo knows how to get information about the node for this kubelet. - nodeInfo predicates.NodeInfo + // nodeLister knows how to list nodes + nodeLister corelisters.NodeLister // a list of node labels to register nodeLabels map[string]string @@ -977,6 +976,7 @@ type Kubelet struct { probeManager prober.Manager // Manages container health check results. livenessManager proberesults.Manager + startupManager proberesults.Manager // How long to keep idle streaming command execution/port forwarding // connections open before terminating them @@ -1422,15 +1422,13 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { go kl.fastStatusUpdateOnce() // start syncing lease - if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) { - go kl.nodeLeaseController.Run(wait.NeverStop) - } + go kl.nodeLeaseController.Run(wait.NeverStop) } go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop) - // Start loop to sync iptables util rules + // Set up iptables util rules if kl.makeIPTablesUtilChains { - go wait.Until(kl.syncNetworkUtil, 1*time.Minute, wait.NeverStop) + kl.initNetworkUtil() } // Start a goroutine responsible for killing pods (that are not properly @@ -1834,9 +1832,16 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand factor = 2 ) duration := base + // Responsible for checking limits in resolv.conf + // The limits do not have anything to do with individual pods + // Since this is called in syncLoop, we don't need to call it anywhere else + if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" { + kl.dnsConfigurer.CheckLimitsForResolvConf() + } + for { if err := kl.runtimeState.runtimeErrors(); err != nil { - klog.Infof("skipping pod synchronization - %v", err) + klog.Errorf("skipping pod synchronization - %v", err) // exponential backoff time.Sleep(duration) duration = time.Duration(math.Min(float64(max), factor*float64(duration))) @@ -2040,11 +2045,6 @@ func (kl *Kubelet) handleMirrorPod(mirrorPod *v1.Pod, start time.Time) { func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { start := kl.clock.Now() sort.Sort(sliceutils.PodsByCreationTime(pods)) - // Responsible for checking limits in resolv.conf - // The limits do not have anything to do with individual pods - if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" { - kl.dnsConfigurer.CheckLimitsForResolvConf() - } for _, pod := range pods { existingPods := kl.podManager.GetPods() // Always add the pod to the pod manager. Kubelet relies on the pod @@ -2053,7 +2053,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { // the apiserver and no action (other than cleanup) is required. kl.podManager.AddPod(pod) - if kubepod.IsMirrorPod(pod) { + if kubetypes.IsMirrorPod(pod) { kl.handleMirrorPod(pod, start) continue } @@ -2082,13 +2082,9 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { // being updated from a config source. func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { start := kl.clock.Now() - // Responsible for checking limits in resolv.conf - if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" { - kl.dnsConfigurer.CheckLimitsForResolvConf() - } for _, pod := range pods { kl.podManager.UpdatePod(pod) - if kubepod.IsMirrorPod(pod) { + if kubetypes.IsMirrorPod(pod) { kl.handleMirrorPod(pod, start) continue } @@ -2105,7 +2101,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { kl.podManager.DeletePod(pod) - if kubepod.IsMirrorPod(pod) { + if kubetypes.IsMirrorPod(pod) { kl.handleMirrorPod(pod, start) continue } @@ -2307,23 +2303,3 @@ func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kub } return config } - -// CachedNodeInfo implements NodeInfo -type CachedNodeInfo struct { - corelisters.NodeLister -} - -// GetNodeInfo returns cached data for the node name. -func (c *CachedNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { - node, err := c.Get(nodeName) - - if apierrors.IsNotFound(err) { - return nil, err - } - - if err != nil { - return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", nodeName, err) - } - - return node, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index 92ec4a9f6a4..1554972f9f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -17,6 +17,7 @@ limitations under the License. package kubelet import ( + "context" "fmt" "io/ioutil" "net" @@ -24,15 +25,15 @@ import ( cadvisorapiv1 "github.com/google/cadvisor/info/v1" "k8s.io/klog" + "k8s.io/utils/mount" + utilpath "k8s.io/utils/path" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/util/mount" utilnode "k8s.io/kubernetes/pkg/util/node" - utilpath "k8s.io/utils/path" ) // getRootDir returns the full path to the directory under which kubelet can @@ -225,9 +226,9 @@ func (kl *Kubelet) getRuntime() kubecontainer.Runtime { // GetNode returns the node info for the configured node name of this Kubelet. func (kl *Kubelet) GetNode() (*v1.Node, error) { if kl.kubeClient == nil { - return kl.initialNode() + return kl.initialNode(context.TODO()) } - return kl.nodeInfo.GetNodeInfo(string(kl.nodeName)) + return kl.nodeLister.Get(string(kl.nodeName)) } // getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates(). @@ -237,11 +238,11 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) { // zero capacity, and the default labels. func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) { if kl.kubeClient != nil { - if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil { + if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil { return n, nil } } - return kl.initialNode() + return kl.initialNode(context.TODO()) } // GetNodeConfig returns the container manager node config. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go index 1c9ad46b989..f3b82c94142 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go @@ -20,11 +20,20 @@ package kubelet import ( "fmt" + "time" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) +func (kl *Kubelet) initNetworkUtil() { + kl.syncNetworkUtil() + go kl.iptClient.Monitor(utiliptables.Chain("KUBE-KUBELET-CANARY"), + []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter}, + kl.syncNetworkUtil, 1*time.Minute, wait.NeverStop) +} + // syncNetworkUtil ensures the network utility are present on host. // Network util includes: // 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go index 53267bfc5f6..bf0a3ba52fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go @@ -19,4 +19,4 @@ limitations under the License. package kubelet // Do nothing. -func (kl *Kubelet) syncNetworkUtil() {} +func (kl *Kubelet) initNetworkUtil() {} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index 7e6fa2f12bc..5ed554c0fe8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -25,18 +25,16 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/nodestatus" @@ -63,7 +61,7 @@ func (kl *Kubelet) registerWithAPIServer() { step = 7 * time.Second } - node, err := kl.initialNode() + node, err := kl.initialNode(context.TODO()) if err != nil { klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) continue @@ -150,11 +148,15 @@ func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool { func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool { defaultLabels := []string{ v1.LabelHostname, + v1.LabelZoneFailureDomainStable, + v1.LabelZoneRegionStable, v1.LabelZoneFailureDomain, v1.LabelZoneRegion, + v1.LabelInstanceTypeStable, v1.LabelInstanceType, v1.LabelOSStable, v1.LabelArchStable, + v1.LabelWindowsBuild, kubeletapis.LabelOS, kubeletapis.LabelArch, } @@ -214,7 +216,7 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v // initialNode constructs the initial v1.Node for this Kubelet, incorporating node // labels, information from the cloud provider, and Kubelet configuration. -func (kl *Kubelet) initialNode() (*v1.Node, error) { +func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: string(kl.nodeName), @@ -230,6 +232,14 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { Unschedulable: !kl.registerSchedulable, }, } + osLabels, err := getOSSpecificLabels() + if err != nil { + return nil, err + } + for label, value := range osLabels { + node.Labels[label] = value + } + nodeTaints := make([]v1.Taint, 0) if len(kl.registerWithTaints) > 0 { taints := make([]v1.Taint, len(kl.registerWithTaints)) @@ -242,17 +252,15 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { } unschedulableTaint := v1.Taint{ - Key: schedulerapi.TaintNodeUnschedulable, + Key: v1.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule, } - // If TaintNodesByCondition enabled, taint node with TaintNodeUnschedulable when initializing + // Taint node with TaintNodeUnschedulable when initializing // node to avoid race condition; refer to #63897 for more detail. - if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) { - if node.Spec.Unschedulable && - !taintutil.TaintExists(nodeTaints, &unschedulableTaint) { - nodeTaints = append(nodeTaints, unschedulableTaint) - } + if node.Spec.Unschedulable && + !taintutil.TaintExists(nodeTaints, &unschedulableTaint) { + nodeTaints = append(nodeTaints, unschedulableTaint) } if kl.externalCloudProvider { @@ -320,34 +328,40 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { // local metadata server here. var err error if node.Spec.ProviderID == "" { - node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(context.TODO(), kl.cloud, kl.nodeName) + node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(ctx, kl.cloud, kl.nodeName) if err != nil { return nil, err } } - instanceType, err := instances.InstanceType(context.TODO(), kl.nodeName) + instanceType, err := instances.InstanceType(ctx, kl.nodeName) if err != nil { return nil, err } if instanceType != "" { klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType) + node.ObjectMeta.Labels[v1.LabelInstanceTypeStable] = instanceType } // If the cloud has zone information, label the node with the zone information zones, ok := kl.cloud.Zones() if ok { - zone, err := zones.GetZone(context.TODO()) + zone, err := zones.GetZone(ctx) if err != nil { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain) node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain) + node.ObjectMeta.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain } if zone.Region != "" { klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region) node.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region) + node.ObjectMeta.Labels[v1.LabelZoneRegionStable] = zone.Region } } } @@ -430,7 +444,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { kl.setNodeStatus(node) now := kl.clock.Now() - if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) && now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { + if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) { // We must mark the volumes as ReportedInUse in volume manager's dsw even // if no changes were made to the node status (no volumes were added or removed @@ -503,7 +517,7 @@ func (kl *Kubelet) setNodeStatus(node *v1.Node) { for i, f := range kl.setNodeStatusFuncs { klog.V(5).Infof("Setting node status at position %v", i) if err := f(node); err != nil { - klog.Warningf("Failed to set some node status fields: %s", err) + klog.Errorf("Failed to set some node status fields: %s", err) } } } @@ -541,9 +555,9 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList), nodestatus.GoRuntime(), ) - if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { - setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits)) - } + // Volume limits + setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits)) + setters = append(setters, nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent), nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_others.go new file mode 100644 index 00000000000..ab1e3647265 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_others.go @@ -0,0 +1,23 @@ +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +func getOSSpecificLabels() (map[string]string, error) { + return nil, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_windows.go similarity index 61% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_windows.go index c4013836566..53d523bcc88 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_windows.go @@ -1,5 +1,7 @@ +// +build windows + /* -Copyright 2015 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package kubelet import ( - "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/kubelet/winstats" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +func getOSSpecificLabels() (map[string]string, error) { + osInfo, err := winstats.GetOSInfo() + if err != nil { + return nil, err + } + + return map[string]string{v1.LabelWindowsBuild: osInfo.GetBuild()}, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index 013d0f55aea..bb109753e16 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -128,15 +128,15 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol } // makeMounts determines the mount points for the given container. -func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) { +func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) { // Kubernetes only mounts on /etc/hosts if: // - container is not an infrastructure (pause) container // - container is not already mounting on /etc/hosts // - OS is not Windows // Kubernetes will not mount /etc/hosts if: // - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set. - mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows" - klog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) + mountEtcHostsFile := len(podIPs) > 0 && runtime.GOOS != "windows" + klog.V(3).Infof("container: %v/%v/%v podIPs: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIPs, mountEtcHostsFile) mounts := []kubecontainer.Mount{} var cleanupAction func() for i, mount := range container.VolumeMounts { @@ -258,7 +258,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h } if mountEtcHostsFile { hostAliases := pod.Spec.HostAliases - hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork) + hostsMount, err := makeHostsMount(podDir, podIPs, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork) if err != nil { return nil, cleanupAction, err } @@ -292,10 +292,11 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M } // makeHostsMount makes the mountpoint for the hosts file that the containers -// in a pod are injected with. -func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) { +// in a pod are injected with. podIPs is provided instead of podIP as podIPs +// are present even if dual-stack feature flag is not enabled. +func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) { hostsFilePath := path.Join(podDir, "etc-hosts") - if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil { + if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil { return nil, err } return &kubecontainer.Mount{ @@ -309,7 +310,7 @@ func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases // ensureHostsFile ensures that the given host file has an up-to-date ip, host // name, and domain name. -func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error { +func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error { var hostsFileContent []byte var err error @@ -323,7 +324,7 @@ func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string, hostAlia } } else { // if Pod is not using host network, create a managed hosts file with Pod IP and other information. - hostsFileContent = managedHostsFileContent(hostIP, hostName, hostDomainName, hostAliases) + hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases) } return ioutil.WriteFile(fileName, hostsFileContent, 0644) @@ -342,9 +343,9 @@ func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]b return buffer.Bytes(), nil } -// managedHostsFileContent generates the content of the managed etc hosts based on Pod IP and other +// managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other // information. -func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte { +func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte { var buffer bytes.Buffer buffer.WriteString(managedHostsHeader) buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost @@ -354,9 +355,16 @@ func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliase buffer.WriteString("fe00::1\tip6-allnodes\n") buffer.WriteString("fe00::2\tip6-allrouters\n") if len(hostDomainName) > 0 { - buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName)) + // host entry generated for all IPs in podIPs + // podIPs field is populated for clusters even + // dual-stack feature flag is not enabled. + for _, hostIP := range hostIPs { + buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName)) + } } else { - buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName)) + for _, hostIP := range hostIPs { + buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName)) + } } buffer.Write(hostsEntriesFromHostAliases(hostAliases)) return buffer.Bytes() @@ -433,7 +441,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) { +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { opts, err := kl.containerManager.GetResources(pod, container) if err != nil { return nil, nil, err @@ -459,21 +467,23 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai opts.Devices = append(opts.Devices, blkVolumes...) } - envs, err := kl.makeEnvironmentVariables(pod, container, podIP) + envs, err := kl.makeEnvironmentVariables(pod, container, podIP, podIPs) if err != nil { return nil, nil, err } opts.Envs = append(opts.Envs, envs...) - mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.hostutil, kl.subpather, opts.Envs) + // only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled. + mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs) if err != nil { return nil, cleanupAction, err } opts.Mounts = append(opts.Mounts, mounts...) - // Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and - // Docker for Windows has a bug where only directories can be mounted - if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" { + // adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot + // be mounted as volumes using Docker for Windows. + supportsSingleFileMapping := kl.containerRuntime.SupportsSingleFileMapping() + if len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping { p := kl.getPodContainerDir(pod.UID, container.Name) if err := os.MkdirAll(p, 0750); err != nil { klog.Errorf("Error on creating %q: %v", p, err) @@ -545,7 +555,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[ } // Make the environment variables for a pod in the given namespace. -func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) { +func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) ([]kubecontainer.EnvVar, error) { if pod.Spec.EnableServiceLinks == nil { return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars") } @@ -669,7 +679,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // Step 1b: resolve alternate env var sources switch { case envVar.ValueFrom.FieldRef != nil: - runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) + runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, podIPs) if err != nil { return result, err } @@ -770,7 +780,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. -func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) { +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) { internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") if err != nil { return "", err @@ -788,6 +798,8 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod return hostIP.String(), nil case "status.podIP": return podIP, nil + case "status.podIPs": + return strings.Join(podIPs, ","), nil } return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) } @@ -899,7 +911,7 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool { // been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server. func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { if !notRunning(status.ContainerStatuses) { - // We shouldnt delete pods that still have running containers + // We shouldn't delete pods that still have running containers klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return false } @@ -918,7 +930,7 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo return false } if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes { - // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes + // We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD index 0d6581955fb..b9053f08d7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD @@ -43,6 +43,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go index b501b670870..8f023051b17 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" @@ -106,8 +107,9 @@ func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource, // DecodeRemoteConfigSource is a helper for using the apimachinery to decode serialized RemoteConfigSources; // e.g. the metadata stored by checkpoint/store/fsstore.go func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) { - // decode the remote config source - _, codecs, err := scheme.NewSchemeAndCodecs() + // Decode the remote config source. We want this to be non-strict + // so we don't error out on newer API keys. + _, codecs, err := scheme.NewSchemeAndCodecs(serializer.DisableStrict) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD index b7ad9dadc79..d32ca1c33bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD @@ -43,6 +43,8 @@ go_test( "//pkg/kubelet/kubeletconfig/util/test:go_default_library", "//pkg/util/filesystem:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go index 13f111a97f7..63aef74f126 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go @@ -45,7 +45,7 @@ type fsLoader struct { // NewFsLoader returns a Loader that loads a KubeletConfiguration from the `kubeletFile` func NewFsLoader(fs utilfs.Filesystem, kubeletFile string) (Loader, error) { - _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs() + _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs(serializer.EnableStrict) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD index c97d1d49b32..62fe4f7dfeb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD @@ -14,9 +14,12 @@ go_library( "//pkg/apis/core/install:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/config/scheme:go_default_library", + "//pkg/kubelet/apis/config/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go index 3801bb16bcb..361bb91faf2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go @@ -19,6 +19,9 @@ package codec import ( "fmt" + "github.com/pkg/errors" + "k8s.io/klog" + // ensure the core apis are installed _ "k8s.io/kubernetes/pkg/apis/core/install" @@ -28,9 +31,10 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" + kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1" ) -// EncodeKubeletConfig encodes an internal KubeletConfiguration to an external YAML representation +// EncodeKubeletConfig encodes an internal KubeletConfiguration to an external YAML representation. func EncodeKubeletConfig(internal *kubeletconfig.KubeletConfiguration, targetVersion schema.GroupVersion) ([]byte, error) { encoder, err := NewKubeletconfigYAMLEncoder(targetVersion) if err != nil { @@ -44,7 +48,7 @@ func EncodeKubeletConfig(internal *kubeletconfig.KubeletConfiguration, targetVer return data, nil } -// NewKubeletconfigYAMLEncoder returns an encoder that can write objects in the kubeletconfig API group to YAML +// NewKubeletconfigYAMLEncoder returns an encoder that can write objects in the kubeletconfig API group to YAML. func NewKubeletconfigYAMLEncoder(targetVersion schema.GroupVersion) (runtime.Encoder, error) { _, codecs, err := scheme.NewSchemeAndCodecs() if err != nil { @@ -58,7 +62,7 @@ func NewKubeletconfigYAMLEncoder(targetVersion schema.GroupVersion) (runtime.Enc return codecs.EncoderForVersion(info.Serializer, targetVersion), nil } -// NewYAMLEncoder generates a new runtime.Encoder that encodes objects to YAML +// NewYAMLEncoder generates a new runtime.Encoder that encodes objects to YAML. func NewYAMLEncoder(groupName string) (runtime.Encoder, error) { // encode to YAML mediaType := "application/yaml" @@ -72,16 +76,55 @@ func NewYAMLEncoder(groupName string) (runtime.Encoder, error) { return nil, fmt.Errorf("no enabled versions for group %q", groupName) } - // the "best" version supposedly comes first in the list returned from legacyscheme.Registry.EnabledVersionsForGroup + // the "best" version supposedly comes first in the list returned from legacyscheme.Registry.EnabledVersionsForGroup. return legacyscheme.Codecs.EncoderForVersion(info.Serializer, versions[0]), nil } -// DecodeKubeletConfiguration decodes a serialized KubeletConfiguration to the internal type +// newLenientSchemeAndCodecs returns a scheme that has only v1beta1 registered into +// it and a CodecFactory with strict decoding disabled. +func newLenientSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { + lenientScheme := runtime.NewScheme() + if err := kubeletconfig.AddToScheme(lenientScheme); err != nil { + return nil, nil, fmt.Errorf("failed to add internal kubelet config API to lenient scheme: %v", err) + } + if err := kubeletconfigv1beta1.AddToScheme(lenientScheme); err != nil { + return nil, nil, fmt.Errorf("failed to add kubelet config v1beta1 API to lenient scheme: %v", err) + } + lenientCodecs := serializer.NewCodecFactory(lenientScheme, serializer.DisableStrict) + return lenientScheme, &lenientCodecs, nil +} + +// DecodeKubeletConfiguration decodes a serialized KubeletConfiguration to the internal type. func DecodeKubeletConfiguration(kubeletCodecs *serializer.CodecFactory, data []byte) (*kubeletconfig.KubeletConfiguration, error) { - // the UniversalDecoder runs defaulting and returns the internal type by default + var ( + obj runtime.Object + gvk *schema.GroupVersionKind + ) + + // The UniversalDecoder runs defaulting and returns the internal type by default. obj, gvk, err := kubeletCodecs.UniversalDecoder().Decode(data, nil, nil) if err != nil { - return nil, fmt.Errorf("failed to decode, error: %v", err) + // Try strict decoding first. If that fails decode with a lenient + // decoder, which has only v1beta1 registered, and log a warning. + // The lenient path is to be dropped when support for v1beta1 is dropped. + if !runtime.IsStrictDecodingError(err) { + return nil, errors.Wrap(err, "failed to decode") + } + + var lenientErr error + _, lenientCodecs, lenientErr := newLenientSchemeAndCodecs() + if lenientErr != nil { + return nil, lenientErr + } + + obj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil) + if lenientErr != nil { + // Lenient decoding failed with the current version, return the + // original strict error. + return nil, fmt.Errorf("failed lenient decoding: %v", err) + } + // Continue with the v1beta1 object that was decoded leniently, but emit a warning. + klog.Warningf("using lenient decoding as strict decoding failed: %v", err) } internalKC, ok := obj.(*kubeletconfig.KubeletConfiguration) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD index 3813012d891..6738b6edfd9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD @@ -53,6 +53,7 @@ go_library( "//pkg/util/parsers:go_default_library", "//pkg/util/selinux:go_default_library", "//pkg/util/tail:go_default_library", + "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -71,6 +72,9 @@ go_library( "//vendor/google.golang.org/grpc/status:go_default_library", "//vendor/k8s.io/klog:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/qos:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/qos:go_default_library", ], @@ -107,6 +111,7 @@ go_test( "//pkg/kubelet/container/testing:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/prober/results:go_default_library", "//pkg/kubelet/runtimeclass:go_default_library", "//pkg/kubelet/runtimeclass/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 2f2a64f9d45..2552ebe8d3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -78,6 +78,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS cpuCFSQuota: false, cpuCFSQuotaPeriod: metav1.Duration{Duration: time.Microsecond * 100}, livenessManager: proberesults.NewManager(), + startupManager: proberesults.NewManager(), containerRefManager: kubecontainer.NewRefManager(), machineInfo: machineInfo, osInterface: osInterface, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go index 1d7c4e2c12d..2d970333b3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go @@ -24,10 +24,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog" - "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -250,7 +248,7 @@ func pidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { if pod.Spec.HostPID { return runtimeapi.NamespaceMode_NODE } - if utilfeature.DefaultFeatureGate.Enabled(features.PodShareProcessNamespace) && pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { + if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { return runtimeapi.NamespaceMode_POD } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 81e6809f11d..20a35899f28 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -25,6 +25,7 @@ import ( "net/url" "os" "path/filepath" + goruntime "runtime" "sort" "strings" "sync" @@ -47,6 +48,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/util/selinux" "k8s.io/kubernetes/pkg/util/tail" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) var ( @@ -90,7 +92,7 @@ func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { // Step 1: pull the image. imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig) if err != nil { @@ -113,7 +115,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb restartCount = containerStatus.RestartCount + 1 } - containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef) + containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs) if cleanupAction != nil { defer cleanupAction() } @@ -193,8 +195,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb } // generateContainerConfig generates container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, func(), error) { - opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string) (*runtimeapi.ContainerConfig, func(), error) { + opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs) if err != nil { return nil, nil, err } @@ -292,7 +294,9 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO // The reason we create and mount the log file in here (not in kubelet) is because // the file's location depends on the ID of the container, and we need to create and // mount the file before actually starting the container. - if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 { + // we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd. + supportsSingleFileMapping := m.SupportsSingleFileMapping() + if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping { // Because the PodContainerDir contains pod uid and container name which is unique enough, // here we just add a random id to make the path unique for different instances // of the same container. @@ -312,10 +316,13 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO utilruntime.HandleError(fmt.Errorf("unable to set termination-log file permissions %q: %v", containerLogPath, err)) } + // Volume Mounts fail on Windows if it is not of the form C:/ + containerLogPath = volumeutil.MakeAbsolutePath(goruntime.GOOS, containerLogPath) + terminationMessagePath := volumeutil.MakeAbsolutePath(goruntime.GOOS, container.TerminationMessagePath) selinuxRelabel := selinux.SELinuxEnabled() volumeMounts = append(volumeMounts, &runtimeapi.Mount{ HostPath: containerLogPath, - ContainerPath: container.TerminationMessagePath, + ContainerPath: terminationMessagePath, SelinuxRelabel: selinuxRelabel, }) } @@ -355,6 +362,8 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag if len(terminationMessagePath) == 0 { return "", fallbackToLogs } + // Volume Mounts fail on Windows if it is not of the form C:/ + terminationMessagePath = volumeutil.MakeAbsolutePath(goruntime.GOOS, terminationMessagePath) for _, mount := range status.Mounts { if mount.ContainerPath != terminationMessagePath { continue @@ -407,7 +416,9 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n if status.State == runtimeapi.ContainerState_CONTAINER_EXITED { // Populate the termination message if needed. annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) - fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && cStatus.ExitCode != 0 + // If a container cannot even be started, it certainly does not have logs, so no need to fallbackToLogs. + fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && + cStatus.ExitCode != 0 && cStatus.Reason != "ContainerCannotRun" tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs) if checkLogs { // if dockerLegacyService is populated, we're supposed to use it to fetch logs @@ -420,9 +431,12 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n tMessage = m.readLastStringFromContainerLogs(status.GetLogPath()) } } - // Use the termination message written by the application is not empty + // Enrich the termination message written by the application is not empty if len(tMessage) != 0 { - cStatus.Message = tMessage + if len(cStatus.Message) != 0 { + cStatus.Message += ": " + } + cStatus.Message += tMessage } } statuses[i] = cStatus diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 7ca444e4cdf..f3f9dfe3c5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "os" + goruntime "runtime" "time" cadvisorapi "github.com/google/cadvisor/info/v1" @@ -100,6 +101,7 @@ type kubeGenericRuntimeManager struct { // Health check results. livenessManager proberesults.Manager + startupManager proberesults.Manager // If true, enforce container cpu limits with CFS quota support cpuCFSQuota bool @@ -150,6 +152,7 @@ type LegacyLogProvider interface { func NewKubeGenericRuntimeManager( recorder record.EventRecorder, livenessManager proberesults.Manager, + startupManager proberesults.Manager, seccompProfileRoot string, containerRefManager *kubecontainer.RefManager, machineInfo *cadvisorapi.MachineInfo, @@ -175,6 +178,7 @@ func NewKubeGenericRuntimeManager( cpuCFSQuotaPeriod: cpuCFSQuotaPeriod, seccompProfileRoot: seccompProfileRoot, livenessManager: livenessManager, + startupManager: startupManager, containerRefManager: containerRefManager, machineInfo: machineInfo, osInterface: osInterface, @@ -243,6 +247,17 @@ func (m *kubeGenericRuntimeManager) Type() string { return m.runtimeName } +// SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not. +// It is supported on Windows only if the container runtime is containerd. +func (m *kubeGenericRuntimeManager) SupportsSingleFileMapping() bool { + switch goruntime.GOOS { + case "windows": + return m.Type() != types.DockerContainerRuntime + default: + return true + } +} + func newRuntimeVersion(version string) (*utilversion.Version, error) { if ver, err := utilversion.ParseSemantic(version); err == nil { return ver, err @@ -420,7 +435,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. sandboxStatus := podStatus.SandboxStatuses[0] if readySandboxCount > 1 { - klog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod)) + klog.V(2).Infof("Multiple sandboxes are ready for Pod %q. Need to reconcile them", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY { @@ -436,7 +451,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku // Needs to create a new sandbox when the sandbox does not have an IP address. if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network.Ip == "" { - klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } @@ -590,6 +605,9 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku } else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure { // If the container failed the liveness probe, we should kill it. message = fmt.Sprintf("Container %s failed liveness probe", container.Name) + } else if startup, found := m.startupManager.Get(containerStatus.ID); found && startup == proberesults.Failure { + // If the container failed the startup probe, we should kill it. + message = fmt.Sprintf("Container %s failed startup probe", container.Name) } else { // Keep the container. keepCount++ @@ -681,12 +699,13 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine // by container garbage collector. m.pruneInitContainersBeforeStart(pod, podStatus) - // We pass the value of the PRIMARY podIP down to generatePodSandboxConfig and - // generateContainerConfig, which in turn passes it to various other - // functions, in order to facilitate functionality that requires this - // value (hosts file and downward API) and avoid races determining + // We pass the value of the PRIMARY podIP and list of podIPs down to + // generatePodSandboxConfig and generateContainerConfig, which in turn + // passes it to various other functions, in order to facilitate functionality + // that requires this value (hosts file and downward API) and avoid races determining // the pod IP in cases where a container requires restart but the - // podIP isn't in the status manager yet. + // podIP isn't in the status manager yet. The list of podIPs is used to + // generate the hosts file. // // We default to the IPs in the passed-in pod status, and overwrite them if the // sandbox needs to be (re)started. @@ -712,7 +731,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine if referr != nil { klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } - m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err) + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed to create pod sandbox: %v", err) return } klog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) @@ -772,7 +791,8 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine } klog.V(4).Infof("Creating %v %+v in pod %v", typeName, container, format.Pod(pod)) - if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { + // NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs. + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, podIPs); err != nil { startContainerResult.Fail(err, msg) // known errors that are logged in other places are logged at higher levels here to avoid // repetitive log spam diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go index 0d37b123fc5..ac1b3d93513 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go @@ -295,7 +295,7 @@ func isInUse(l string, logs []string) bool { if strings.HasSuffix(l, tmpSuffix) { return false } - // All compresed logs are in use. + // All compressed logs are in use. if strings.HasSuffix(l, compressSuffix) { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD index 176015f2e39..5f3d8f606b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD @@ -17,7 +17,6 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD index d98893388cb..00bc335320f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD @@ -13,7 +13,7 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/server/stats:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -29,8 +29,7 @@ go_test( "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/server/stats/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus/testutil:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go index 23194106efa..a793429501a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go @@ -17,14 +17,13 @@ limitations under the License. package collectors import ( - "github.com/prometheus/client_golang/prometheus" + "k8s.io/component-base/metrics" "k8s.io/klog" - statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" ) var ( - descLogSize = prometheus.NewDesc( + descLogSize = metrics.NewDesc( "kubelet_container_log_filesystem_used_bytes", "Bytes used by the container's logs on the filesystem.", []string{ @@ -33,28 +32,35 @@ var ( "pod", "container", }, nil, + metrics.ALPHA, + "", ) ) type logMetricsCollector struct { + metrics.BaseStableCollector + podStats func() ([]statsapi.PodStats, error) } -// NewLogMetricsCollector implements the prometheus.Collector interface and +// Check if logMetricsCollector implements necessary interface +var _ metrics.StableCollector = &logMetricsCollector{} + +// NewLogMetricsCollector implements the metrics.StableCollector interface and // exposes metrics about container's log volume size. -func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) prometheus.Collector { +func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) metrics.StableCollector { return &logMetricsCollector{ podStats: podStats, } } -// Describe implements the prometheus.Collector interface. -func (c *logMetricsCollector) Describe(ch chan<- *prometheus.Desc) { +// DescribeWithStability implements the metrics.StableCollector interface. +func (c *logMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) { ch <- descLogSize } -// Collect implements the prometheus.Collector interface. -func (c *logMetricsCollector) Collect(ch chan<- prometheus.Metric) { +// CollectWithStability implements the metrics.StableCollector interface. +func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) { podStats, err := c.podStats() if err != nil { klog.Errorf("failed to get pod stats: %v", err) @@ -64,9 +70,9 @@ func (c *logMetricsCollector) Collect(ch chan<- prometheus.Metric) { for _, ps := range podStats { for _, c := range ps.Containers { if c.Logs != nil && c.Logs.UsedBytes != nil { - ch <- prometheus.MustNewConstMetric( + ch <- metrics.NewLazyConstMetric( descLogSize, - prometheus.GaugeValue, + metrics.GaugeValue, float64(*c.Logs.UsedBytes), ps.PodRef.UID, ps.PodRef.Namespace, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go index 78e047527c1..1c76a7ca1eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go @@ -17,58 +17,68 @@ limitations under the License. package collectors import ( - "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog" + "k8s.io/component-base/metrics" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - "k8s.io/kubernetes/pkg/kubelet/metrics" + kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" ) var ( - volumeStatsCapacityBytesDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsCapacityBytesKey), + volumeStatsCapacityBytesDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsCapacityBytesKey), "Capacity in bytes of the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) - volumeStatsAvailableBytesDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsAvailableBytesKey), + volumeStatsAvailableBytesDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsAvailableBytesKey), "Number of available bytes in the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) - volumeStatsUsedBytesDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsUsedBytesKey), + volumeStatsUsedBytesDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsUsedBytesKey), "Number of used bytes in the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) - volumeStatsInodesDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsInodesKey), + volumeStatsInodesDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesKey), "Maximum number of inodes in the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) - volumeStatsInodesFreeDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsInodesFreeKey), + volumeStatsInodesFreeDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesFreeKey), "Number of free inodes in the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) - volumeStatsInodesUsedDesc = prometheus.NewDesc( - prometheus.BuildFQName("", metrics.KubeletSubsystem, metrics.VolumeStatsInodesUsedKey), + volumeStatsInodesUsedDesc = metrics.NewDesc( + metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesUsedKey), "Number of used inodes in the volume", []string{"namespace", "persistentvolumeclaim"}, nil, + metrics.ALPHA, "", ) ) type volumeStatsCollector struct { + metrics.BaseStableCollector + statsProvider serverstats.Provider } -// NewVolumeStatsCollector creates a volume stats prometheus collector. -func NewVolumeStatsCollector(statsProvider serverstats.Provider) prometheus.Collector { +// Check if volumeStatsCollector implements necessary interface +var _ metrics.StableCollector = &volumeStatsCollector{} + +// NewVolumeStatsCollector creates a volume stats metrics.StableCollector. +func NewVolumeStatsCollector(statsProvider serverstats.Provider) metrics.StableCollector { return &volumeStatsCollector{statsProvider: statsProvider} } -// Describe implements the prometheus.Collector interface. -func (collector *volumeStatsCollector) Describe(ch chan<- *prometheus.Desc) { +// DescribeWithStability implements the metrics.StableCollector interface. +func (collector *volumeStatsCollector) DescribeWithStability(ch chan<- *metrics.Desc) { ch <- volumeStatsCapacityBytesDesc ch <- volumeStatsAvailableBytesDesc ch <- volumeStatsUsedBytesDesc @@ -77,20 +87,16 @@ func (collector *volumeStatsCollector) Describe(ch chan<- *prometheus.Desc) { ch <- volumeStatsInodesUsedDesc } -// Collect implements the prometheus.Collector interface. -func (collector *volumeStatsCollector) Collect(ch chan<- prometheus.Metric) { +// CollectWithStability implements the metrics.StableCollector interface. +func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) { podStats, err := collector.statsProvider.ListPodStats() if err != nil { return } - addGauge := func(desc *prometheus.Desc, pvcRef *stats.PVCReference, v float64, lv ...string) { + addGauge := func(desc *metrics.Desc, pvcRef *stats.PVCReference, v float64, lv ...string) { lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...) - metric, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v, lv...) - if err != nil { - klog.Warningf("Failed to generate metric: %v", err) - return - } - ch <- metric + + ch <- metrics.NewLazyConstMetric(desc, metrics.GaugeValue, v, lv...) } allPVCs := sets.String{} for _, podStat := range podStats { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index 7dfa7882956..c1c1902cb23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -18,12 +18,12 @@ package metrics import ( "fmt" - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" "sync" "time" - "github.com/prometheus/client_golang/prometheus" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -46,6 +46,7 @@ const ( PLEGRelistIntervalKey = "pleg_relist_interval_seconds" EvictionsKey = "evictions" EvictionStatsAgeKey = "eviction_stats_age_seconds" + PreemptionsKey = "preemptions" DeprecatedPodWorkerLatencyKey = "pod_worker_latency_microseconds" DeprecatedPodStartLatencyKey = "pod_start_latency_microseconds" DeprecatedCgroupManagerOperationsKey = "cgroup_manager_latency_microseconds" @@ -105,7 +106,7 @@ var ( Subsystem: KubeletSubsystem, Name: "containers_per_pod_count", Help: "The number of containers per pod.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -116,7 +117,7 @@ var ( Subsystem: KubeletSubsystem, Name: PodWorkerDurationKey, Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"operation_type"}, @@ -127,7 +128,7 @@ var ( Subsystem: KubeletSubsystem, Name: PodStartDurationKey, Help: "Duration in seconds for a single pod to go from pending to running.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -138,7 +139,7 @@ var ( Subsystem: KubeletSubsystem, Name: CgroupManagerOperationsKey, Help: "Duration in seconds for cgroup manager operations. Broken down by method.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"operation_type"}, @@ -149,7 +150,7 @@ var ( Subsystem: KubeletSubsystem, Name: PodWorkerStartDurationKey, Help: "Duration in seconds from seeing a pod to starting a worker.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -160,7 +161,7 @@ var ( Subsystem: KubeletSubsystem, Name: PLEGRelistDurationKey, Help: "Duration in seconds for relisting pods in PLEG.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -182,7 +183,7 @@ var ( Subsystem: KubeletSubsystem, Name: PLEGRelistIntervalKey, Help: "Interval in seconds between relisting in PLEG.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -204,7 +205,7 @@ var ( Subsystem: KubeletSubsystem, Name: RuntimeOperationsDurationKey, Help: "Duration in seconds of runtime operations. Broken down by operation type.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"operation_type"}, @@ -238,11 +239,23 @@ var ( Subsystem: KubeletSubsystem, Name: EvictionStatsAgeKey, Help: "Time between when stats are collected, and when pod is evicted based on those stats by eviction signal", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"eviction_signal"}, ) + // Preemptions is a Counter that tracks the cumulative number of pod preemptions initiated by the kubelet. + // Broken down by preemption signal. A preemption is only recorded for one resource, the sum of all signals + // is the number of preemptions on the given node. + Preemptions = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: PreemptionsKey, + Help: "Cumulative number of pod preemptions by preemption resource", + StabilityLevel: metrics.ALPHA, + }, + []string{"preemption_signal"}, + ) // DevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations. // Broken down by resource name. DevicePluginRegistrationCount = metrics.NewCounterVec( @@ -261,7 +274,7 @@ var ( Subsystem: KubeletSubsystem, Name: DevicePluginAllocationDurationKey, Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.", - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"resource_name"}, @@ -445,7 +458,7 @@ var ( Name: RunPodSandboxDurationKey, Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.", // Use DefBuckets for now, will customize the buckets if necessary. - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"runtime_handler"}, @@ -486,7 +499,7 @@ var ( var registerMetrics sync.Once // Register registers all metrics. -func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheus.Collector) { +func Register(containerCache kubecontainer.RuntimeCache, collectors ...metrics.StableCollector) { // Register the metrics. registerMetrics.Do(func() { legacyregistry.MustRegister(NodeName) @@ -503,6 +516,7 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu legacyregistry.MustRegister(RuntimeOperationsErrors) legacyregistry.MustRegister(Evictions) legacyregistry.MustRegister(EvictionStatsAge) + legacyregistry.MustRegister(Preemptions) legacyregistry.MustRegister(DevicePluginRegistrationCount) legacyregistry.MustRegister(DevicePluginAllocationDuration) legacyregistry.MustRegister(DeprecatedPodWorkerLatency) @@ -526,11 +540,16 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu legacyregistry.MustRegister(ConfigError) } for _, collector := range collectors { - legacyregistry.RawMustRegister(collector) + legacyregistry.CustomMustRegister(collector) } }) } +// GetGather returns the gatherer. It used by test case outside current package. +func GetGather() metrics.Gatherer { + return legacyregistry.DefaultGatherer +} + // SinceInMicroseconds gets the time since the specified start in microseconds. func SinceInMicroseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go deleted file mode 100644 index 2adb3e9bfbb..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mountpod - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/config" - kubepod "k8s.io/kubernetes/pkg/kubelet/pod" - utilstrings "k8s.io/utils/strings" -) - -// Manager is an interface that tracks pods with mount utilities for individual -// volume plugins. -type Manager interface { - GetMountPod(pluginName string) (pod *v1.Pod, container string, err error) -} - -// basicManager is simple implementation of Manager. Pods with mount utilities -// are registered by placing a JSON file into -// /var/lib/kubelet/plugin-containers/.json and this manager just -// finds them there. -type basicManager struct { - registrationDirectory string - podManager kubepod.Manager -} - -// volumePluginRegistration specified format of the json files placed in -// /var/lib/kubelet/plugin-containers/ -type volumePluginRegistration struct { - PodName string `json:"podName"` - PodNamespace string `json:"podNamespace"` - PodUID string `json:"podUID"` - ContainerName string `json:"containerName"` -} - -// NewManager returns a new mount pod manager. -func NewManager(rootDirectory string, podManager kubepod.Manager) (Manager, error) { - regPath := path.Join(rootDirectory, config.DefaultKubeletPluginContainersDirName) - - // Create the directory on startup - os.MkdirAll(regPath, 0700) - - return &basicManager{ - registrationDirectory: regPath, - podManager: podManager, - }, nil -} - -func (m *basicManager) getVolumePluginRegistrationPath(pluginName string) string { - // sanitize plugin name so it does not escape directory - safePluginName := utilstrings.EscapeQualifiedName(pluginName) + ".json" - return path.Join(m.registrationDirectory, safePluginName) -} - -func (m *basicManager) GetMountPod(pluginName string) (pod *v1.Pod, containerName string, err error) { - // Read /var/lib/kubelet/plugin-containers/.json - regPath := m.getVolumePluginRegistrationPath(pluginName) - regBytes, err := ioutil.ReadFile(regPath) - if err != nil { - if os.IsNotExist(err) { - // No pod is registered for this plugin - return nil, "", nil - } - return nil, "", fmt.Errorf("cannot read %s: %v", regPath, err) - } - - // Parse json - var reg volumePluginRegistration - if err := json.Unmarshal(regBytes, ®); err != nil { - return nil, "", fmt.Errorf("unable to parse %s: %s", regPath, err) - } - if len(reg.ContainerName) == 0 { - return nil, "", fmt.Errorf("unable to parse %s: \"containerName\" is not set", regPath) - } - if len(reg.PodUID) == 0 { - return nil, "", fmt.Errorf("unable to parse %s: \"podUID\" is not set", regPath) - } - if len(reg.PodNamespace) == 0 { - return nil, "", fmt.Errorf("unable to parse %s: \"podNamespace\" is not set", regPath) - } - if len(reg.PodName) == 0 { - return nil, "", fmt.Errorf("unable to parse %s: \"podName\" is not set", regPath) - } - - pod, ok := m.podManager.GetPodByName(reg.PodNamespace, reg.PodName) - if !ok { - return nil, "", fmt.Errorf("unable to process %s: pod %s/%s not found", regPath, reg.PodNamespace, reg.PodName) - } - if string(pod.UID) != reg.PodUID { - return nil, "", fmt.Errorf("unable to process %s: pod %s/%s has unexpected UID", regPath, reg.PodNamespace, reg.PodName) - } - // make sure that reg.ContainerName exists in the pod - for i := range pod.Spec.Containers { - if pod.Spec.Containers[i].Name == reg.ContainerName { - return pod, reg.ContainerName, nil - } - } - return nil, "", fmt.Errorf("unable to process %s: pod %s/%s has no container named %q", regPath, reg.PodNamespace, reg.PodName, reg.ContainerName) - -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go index b329b848700..f09b6f111d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -230,7 +230,11 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, } } if fields[0] == "search" { - searches = fields[1:] + // Normalise search fields so the same domain with and without trailing dot will only count once, to avoid hitting search validation limits. + searches = []string{} + for _, s := range fields[1:] { + searches = append(searches, strings.TrimSuffix(s, ".")) + } } if fields[0] == "options" { options = fields[1:] diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go index 790974c6132..86069fe9d4d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go @@ -108,7 +108,7 @@ func (c *controller) sync() { lease, created := c.backoffEnsureLease() c.latestLease = lease // we don't need to update the lease if we just created it - if !created { + if !created && lease != nil { if err := c.retryUpdateLease(lease); err != nil { klog.Errorf("%v, will retry after %v", err, c.renewInterval) } @@ -144,8 +144,15 @@ func (c *controller) backoffEnsureLease() (*coordinationv1.Lease, bool) { func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { lease, err := c.leaseClient.Get(c.holderIdentity, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - // lease does not exist, create it - lease, err := c.leaseClient.Create(c.newLease(nil)) + // lease does not exist, create it. + leaseToCreate := c.newLease(nil) + if len(leaseToCreate.OwnerReferences) == 0 { + // We want to ensure that a lease will always have OwnerReferences set. + // Thus, given that we weren't able to set it correctly, we simply + // not create it this time - we will retry in the next iteration. + return nil, false, nil + } + lease, err := c.leaseClient.Create(leaseToCreate) if err != nil { return nil, false, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD index 047b2ab087c..fcf5f4e9c30 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD @@ -13,7 +13,6 @@ go_library( "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", - "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -22,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -51,7 +51,6 @@ go_test( "//pkg/kubelet/container/testing:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", - "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -63,6 +62,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider/fake:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go index 8ed1cb465b0..a5db1e7ca62 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go @@ -33,6 +33,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" + "k8s.io/component-base/version" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -40,7 +41,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" - "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume" "k8s.io/klog" @@ -376,8 +376,6 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // return func(node *v1.Node) error { verinfo, err := versionInfoFunc() if err != nil { - // TODO(mtaufen): consider removing this log line, since returned error will be logged - klog.Errorf("Error getting version info: %v", err) return fmt.Errorf("error getting version info: %v", err) } @@ -416,8 +414,6 @@ func Images(nodeStatusMaxImages int32, var imagesOnNode []v1.ContainerImage containerImages, err := imageListFunc() if err != nil { - // TODO(mtaufen): consider removing this log line, since returned error will be logged - klog.Errorf("Error getting image list: %v", err) node.Status.Images = imagesOnNode return fmt.Errorf("error getting image list: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom/BUILD index 0e852f1fce7..f1ca684af36 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom/BUILD @@ -13,7 +13,11 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/github.com/google/cadvisor/utils/oomparser:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//staging/src/k8s.io/client-go/tools/record:go_default_library", @@ -24,6 +28,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//staging/src/k8s.io/client-go/tools/record:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", @@ -58,6 +65,11 @@ go_test( srcs = ["oom_watcher_linux_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD index c6bf885ce8a..1f3a3be0fdd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD @@ -37,7 +37,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/BUILD index e4bab30cc03..cc7187c871d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/BUILD @@ -44,12 +44,12 @@ go_test( srcs = ["plugin_manager_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/pluginmanager/pluginwatcher:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go index b039cfdf0fe..904e8015a46 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go @@ -77,9 +77,8 @@ var _ ActualStateOfWorld = &actualStateOfWorld{} // PluginInfo holds information of a plugin type PluginInfo struct { - SocketPath string - FoundInDeprecatedDir bool - Timestamp time.Time + SocketPath string + Timestamp time.Time } func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go index 1b601ba65e4..729904e1c47 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go @@ -34,9 +34,9 @@ import ( // all plugins attached to this node. type DesiredStateOfWorld interface { // AddOrUpdatePlugin add the given plugin in the cache if it doesn't already exist. - // If it does exist in the cache, then the timestamp and foundInDeprecatedDir of the PluginInfo object in the cache will be updated. + // If it does exist in the cache, then the timestamp of the PluginInfo object in the cache will be updated. // An error will be returned if socketPath is empty. - AddOrUpdatePlugin(socketPath string, foundInDeprecatedDir bool) error + AddOrUpdatePlugin(socketPath string) error // RemovePlugin deletes the plugin with the given socket path from the desired // state of world. @@ -120,7 +120,7 @@ func errSuffix(err error) string { return errStr } -func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string, foundInDeprecatedDir bool) error { +func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string) error { dsw.Lock() defer dsw.Unlock() @@ -131,14 +131,13 @@ func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string, foundInDepr klog.V(2).Infof("Plugin (Path %s) exists in actual state cache, timestamp will be updated", socketPath) } - // Update the the PluginInfo object. + // Update the PluginInfo object. // Note that we only update the timestamp in the desired state of world, not the actual state of world // because in the reconciler, we need to check if the plugin in the actual state of world is the same // version as the plugin in the desired state of world dsw.socketFileToInfo[socketPath] = PluginInfo{ - SocketPath: socketPath, - FoundInDeprecatedDir: foundInDeprecatedDir, - Timestamp: time.Now(), + SocketPath: socketPath, + Timestamp: time.Now(), } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go index d2c263bdab6..a0dbb3696a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go @@ -47,7 +47,7 @@ package cache type PluginHandler interface { // Validate returns an error if the information provided by // the potential plugin is erroneous (unsupported version, ...) - ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error + ValidatePlugin(pluginName string, endpoint string, versions []string) error // RegisterPlugin is called so that the plugin can be register by any // plugin consumer // Error encountered here can still be Notified to the plugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/BUILD index a18ac2f69df..d3e803bbafd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/BUILD @@ -7,8 +7,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/pluginmanager/cache:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/klog:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/metrics.go index a6f357be98b..aabc1f7b146 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics/metrics.go @@ -19,8 +19,8 @@ package metrics import ( "sync" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" ) @@ -32,11 +32,13 @@ const ( var ( registerMetrics sync.Once - totalPluginsDesc = prometheus.NewDesc( + totalPluginsDesc = metrics.NewDesc( pluginManagerTotalPlugins, "Number of plugins in Plugin Manager", []string{"socket_path", "state"}, nil, + metrics.ALPHA, + "", ) ) @@ -55,35 +57,33 @@ func (pc pluginCount) add(state, pluginName string) { // Register registers Plugin Manager metrics. func Register(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld) { registerMetrics.Do(func() { - prometheus.MustRegister(&totalPluginsCollector{asw, dsw}) + legacyregistry.CustomMustRegister(&totalPluginsCollector{asw: asw, dsw: dsw}) }) } type totalPluginsCollector struct { + metrics.BaseStableCollector + asw cache.ActualStateOfWorld dsw cache.DesiredStateOfWorld } -var _ prometheus.Collector = &totalPluginsCollector{} +var _ metrics.StableCollector = &totalPluginsCollector{} -// Describe implements the prometheus.Collector interface. -func (c *totalPluginsCollector) Describe(ch chan<- *prometheus.Desc) { +// DescribeWithStability implements the metrics.StableCollector interface. +func (c *totalPluginsCollector) DescribeWithStability(ch chan<- *metrics.Desc) { ch <- totalPluginsDesc } -// Collect implements the prometheus.Collector interface. -func (c *totalPluginsCollector) Collect(ch chan<- prometheus.Metric) { +// CollectWithStability implements the metrics.StableCollector interface. +func (c *totalPluginsCollector) CollectWithStability(ch chan<- metrics.Metric) { for stateName, pluginCount := range c.getPluginCount() { for socketPath, count := range pluginCount { - metric, err := prometheus.NewConstMetric(totalPluginsDesc, - prometheus.GaugeValue, + ch <- metrics.NewLazyConstMetric(totalPluginsDesc, + metrics.GaugeValue, float64(count), socketPath, stateName) - if err != nil { - klog.Warningf("Failed to create metric : %v", err) - } - ch <- metric } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/BUILD index baa361d4857..859bde70415 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/BUILD @@ -9,12 +9,13 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor", visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/util/goroutinemap:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go index 667d8e907b5..12ae38ee404 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go @@ -45,7 +45,7 @@ import ( type OperationExecutor interface { // RegisterPlugin registers the given plugin using the a handler in the plugin handler map. // It then updates the actual state of the world to reflect that. - RegisterPlugin(socketPath string, foundInDeprecatedDir bool, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error + RegisterPlugin(socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error // UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map. // It then updates the actual state of the world to reflect that. @@ -63,7 +63,7 @@ func NewOperationExecutor( } // ActualStateOfWorldUpdater defines a set of operations updating the actual -// state of the world cache after successful registeration/deregistration. +// state of the world cache after successful registration/deregistration. type ActualStateOfWorldUpdater interface { // AddPlugin add the given plugin in the cache if no existing plugin // in the cache has the same socket path. @@ -94,12 +94,11 @@ func (oe *operationExecutor) IsOperationPending(socketPath string) bool { func (oe *operationExecutor) RegisterPlugin( socketPath string, - foundInDeprecatedDir bool, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error { generatedOperation := - oe.operationGenerator.GenerateRegisterPluginFunc(socketPath, foundInDeprecatedDir, timestamp, pluginHandlers, actualStateOfWorld) + oe.operationGenerator.GenerateRegisterPluginFunc(socketPath, timestamp, pluginHandlers, actualStateOfWorld) return oe.pendingOperations.Run( socketPath, generatedOperation) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go index 1089f14c65b..371709a09c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go @@ -26,10 +26,12 @@ import ( "net" "time" + "k8s.io/klog" + "github.com/pkg/errors" "google.golang.org/grpc" "k8s.io/client-go/tools/record" - registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" + registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" ) @@ -59,7 +61,6 @@ type OperationGenerator interface { // Generates the RegisterPlugin function needed to perform the registration of a plugin GenerateRegisterPluginFunc( socketPath string, - foundInDeprecatedDir bool, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error @@ -73,7 +74,6 @@ type OperationGenerator interface { func (og *operationGenerator) GenerateRegisterPluginFunc( socketPath string, - foundInDeprecatedDir bool, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { @@ -104,7 +104,7 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( if infoResp.Endpoint == "" { infoResp.Endpoint = socketPath } - if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions, foundInDeprecatedDir); err != nil { + if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { if err = og.notifyPlugin(client, false, fmt.Sprintf("RegisterPlugin error -- plugin validation failed with err: %v", err)); err != nil { return fmt.Errorf("RegisterPlugin error -- failed to send error at socket %s, err: %v", socketPath, err) } @@ -112,11 +112,13 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( } // We add the plugin to the actual state of world cache before calling a plugin consumer's Register handle // so that if we receive a delete event during Register Plugin, we can process it as a DeRegister call. - actualStateOfWorldUpdater.AddPlugin(cache.PluginInfo{ - SocketPath: socketPath, - FoundInDeprecatedDir: foundInDeprecatedDir, - Timestamp: timestamp, + err = actualStateOfWorldUpdater.AddPlugin(cache.PluginInfo{ + SocketPath: socketPath, + Timestamp: timestamp, }) + if err != nil { + klog.Errorf("RegisterPlugin error -- failed to add plugin at socket %s, err: %v", socketPath, err) + } if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { return og.notifyPlugin(client, false, fmt.Sprintf("RegisterPlugin error -- plugin registration failed with err: %v", err)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/plugin_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/plugin_manager.go index 42e786e76b9..3b0ad82040d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/plugin_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/plugin_manager.go @@ -53,7 +53,6 @@ const ( // PluginManager interface. func NewPluginManager( sockDir string, - deprecatedSockDir string, recorder record.EventRecorder) PluginManager { asw := cache.NewActualStateOfWorld() dsw := cache.NewDesiredStateOfWorld() @@ -71,7 +70,6 @@ func NewPluginManager( pm := &pluginManager{ desiredStateOfWorldPopulator: pluginwatcher.NewWatcher( sockDir, - deprecatedSockDir, dsw, ), reconciler: reconciler, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD index ace8b3702c1..06f911a5bc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD @@ -10,12 +10,12 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher", visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1:go_default_library", "//pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2:go_default_library", "//pkg/kubelet/util:go_default_library", "//pkg/util/filesystem:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", @@ -28,9 +28,9 @@ go_test( srcs = ["plugin_watcher_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go index a74f1b1c784..50ad81fff33 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc" "k8s.io/klog" - registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" + registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" v1beta1 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2" ) @@ -50,7 +50,6 @@ const ( exampleEventValidate examplePluginEvent = 0 exampleEventRegister examplePluginEvent = 1 exampleEventDeRegister examplePluginEvent = 2 - exampleEventError examplePluginEvent = 3 ) // NewExampleHandler provide a example handler @@ -64,11 +63,7 @@ func NewExampleHandler(supportedVersions []string, permitDeprecatedDir bool) *ex } } -func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { - if foundInDeprecatedDir && !p.permitDeprecatedDir { - return fmt.Errorf("device plugin socket was found in a directory that is no longer supported and this test does not permit plugins from deprecated dir") - } - +func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { p.SendEvent(pluginName, exampleEventValidate) n, ok := p.DecreasePluginCount(pluginName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go index be127d77b60..ca2e4342272 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc" "k8s.io/klog" - registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" + registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" v1beta1 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2" @@ -88,10 +88,9 @@ func NewTestExamplePlugin(pluginName string, pluginType string, endpoint string, } // GetPluginInfo returns a PluginInfo object -func GetPluginInfo(plugin *examplePlugin, foundInDeprecatedDir bool) cache.PluginInfo { +func GetPluginInfo(plugin *examplePlugin) cache.PluginInfo { return cache.PluginInfo{ - SocketPath: plugin.endpoint, - FoundInDeprecatedDir: foundInDeprecatedDir, + SocketPath: plugin.endpoint, } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go index c5d1d5ec977..d6ba3687f25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go @@ -19,7 +19,6 @@ package pluginwatcher import ( "fmt" "os" - "path/filepath" "runtime" "strings" "time" @@ -35,20 +34,16 @@ import ( // Watcher is the plugin watcher type Watcher struct { path string - deprecatedPath string fs utilfs.Filesystem fsWatcher *fsnotify.Watcher stopped chan struct{} desiredStateOfWorld cache.DesiredStateOfWorld } -// NewWatcher provides a new watcher -// deprecatedSockDir refers to a pre-GA directory that was used by older plugins -// for socket registration. New plugins should not use this directory. -func NewWatcher(sockDir string, deprecatedSockDir string, desiredStateOfWorld cache.DesiredStateOfWorld) *Watcher { +// NewWatcher provides a new watcher for socket registration +func NewWatcher(sockDir string, desiredStateOfWorld cache.DesiredStateOfWorld) *Watcher { return &Watcher{ path: sockDir, - deprecatedPath: deprecatedSockDir, fs: &utilfs.DefaultFs{}, desiredStateOfWorld: desiredStateOfWorld, } @@ -77,13 +72,6 @@ func (w *Watcher) Start(stopCh <-chan struct{}) error { klog.Errorf("failed to traverse plugin socket path %q, err: %v", w.path, err) } - // Traverse deprecated plugin dir, if specified. - if len(w.deprecatedPath) != 0 { - if err := w.traversePluginDir(w.deprecatedPath); err != nil { - klog.Errorf("failed to traverse deprecated plugin socket path %q, err: %v", w.deprecatedPath, err) - } - } - go func(fsWatcher *fsnotify.Watcher) { defer close(w.stopped) for { @@ -147,10 +135,6 @@ func (w *Watcher) traversePluginDir(dir string) error { switch mode := info.Mode(); { case mode.IsDir(): - if w.containsBlacklistedDir(path) { - return filepath.SkipDir - } - if err := w.fsWatcher.Add(path); err != nil { return fmt.Errorf("failed to watch %s, err: %v", path, err) } @@ -177,10 +161,6 @@ func (w *Watcher) traversePluginDir(dir string) error { func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { klog.V(6).Infof("Handling create event: %v", event) - if w.containsBlacklistedDir(event.Name) { - return nil - } - fi, err := os.Stat(event.Name) if err != nil { return fmt.Errorf("stat file %s failed: %v", event.Name, err) @@ -218,7 +198,7 @@ func (w *Watcher) handlePluginRegistration(socketPath string) error { // removed from the desired world cache, so we still need to call AddOrUpdatePlugin // in this case to update the timestamp klog.V(2).Infof("Adding socket path or updating timestamp %s to desired state cache", socketPath) - err := w.desiredStateOfWorld.AddOrUpdatePlugin(socketPath, w.foundInDeprecatedDir(socketPath)) + err := w.desiredStateOfWorld.AddOrUpdatePlugin(socketPath) if err != nil { return fmt.Errorf("error adding socket path %s or updating timestamp to desired state cache: %v", socketPath, err) } @@ -232,27 +212,3 @@ func (w *Watcher) handleDeleteEvent(event fsnotify.Event) { klog.V(2).Infof("Removing socket path %s from desired state cache", socketPath) w.desiredStateOfWorld.RemovePlugin(socketPath) } - -// While deprecated dir is supported, to add extra protection around #69015 -// we will explicitly blacklist kubernetes.io directory. -func (w *Watcher) containsBlacklistedDir(path string) bool { - return strings.HasPrefix(path, w.deprecatedPath+"/kubernetes.io/") || - path == w.deprecatedPath+"/kubernetes.io" -} - -func (w *Watcher) foundInDeprecatedDir(socketPath string) bool { - if len(w.deprecatedPath) != 0 { - if socketPath == w.deprecatedPath { - return true - } - - deprecatedPath := w.deprecatedPath - if !strings.HasSuffix(deprecatedPath, "/") { - deprecatedPath = deprecatedPath + "/" - } - if strings.HasPrefix(socketPath, deprecatedPath) { - return true - } - } - return false -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/BUILD index 8d28162555a..969fe326640 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/BUILD @@ -20,12 +20,12 @@ go_test( srcs = ["reconciler_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/pluginmanager/operationexecutor:go_default_library", "//pkg/kubelet/pluginmanager/pluginwatcher:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go index 10afecf8e7b..7f6790d5c16 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go @@ -145,7 +145,7 @@ func (rc *reconciler) reconcile() { for _, pluginToRegister := range rc.desiredStateOfWorld.GetPluginsToRegister() { if !rc.actualStateOfWorld.PluginExistsWithCorrectTimestamp(pluginToRegister) { klog.V(5).Infof(pluginToRegister.GenerateMsgDetailed("Starting operationExecutor.RegisterPlugin", "")) - err := rc.operationExecutor.RegisterPlugin(pluginToRegister.SocketPath, pluginToRegister.FoundInDeprecatedDir, pluginToRegister.Timestamp, rc.getHandlers(), rc.actualStateOfWorld) + err := rc.operationExecutor.RegisterPlugin(pluginToRegister.SocketPath, pluginToRegister.Timestamp, rc.getHandlers(), rc.actualStateOfWorld) if err != nil && !goroutinemap.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD index 8d4811ab459..49a0baa8fc3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD @@ -45,6 +45,10 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go index 5078bc808db..fe7cd28c255 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go @@ -17,8 +17,10 @@ limitations under the License. package pod import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + "fmt" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" @@ -39,16 +41,28 @@ type MirrorClient interface { DeleteMirrorPod(podFullName string, uid *types.UID) (bool, error) } +// nodeGetter is a subset a NodeLister, simplified for testing. +type nodeGetter interface { + // Get retrieves the Node for a given name. + Get(name string) (*v1.Node, error) +} + // basicMirrorClient is a functional MirrorClient. Mirror pods are stored in // the kubelet directly because they need to be in sync with the internal // pods. type basicMirrorClient struct { apiserverClient clientset.Interface + nodeGetter nodeGetter + nodeName string } // NewBasicMirrorClient returns a new MirrorClient. -func NewBasicMirrorClient(apiserverClient clientset.Interface) MirrorClient { - return &basicMirrorClient{apiserverClient: apiserverClient} +func NewBasicMirrorClient(apiserverClient clientset.Interface, nodeName string, nodeGetter nodeGetter) MirrorClient { + return &basicMirrorClient{ + apiserverClient: apiserverClient, + nodeName: nodeName, + nodeGetter: nodeGetter, + } } func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { @@ -64,8 +78,25 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { } hash := getPodHash(pod) copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash + + // With the MirrorPodNodeRestriction feature, mirror pods are required to have an owner reference + // to the owning node. + // See http://git.k8s.io/enhancements/keps/sig-auth/20190916-noderestriction-pods.md + nodeUID, err := mc.getNodeUID() + if err != nil { + return fmt.Errorf("failed to get node UID: %v", err) + } + controller := true + copyPod.OwnerReferences = []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Node", + Name: mc.nodeName, + UID: nodeUID, + Controller: &controller, + }} + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { return nil @@ -94,7 +125,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) var GracePeriodSeconds int64 if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { // Unfortunately, there's no generic error for failing a precondition - if !(errors.IsNotFound(err) || errors.IsConflict(err)) { + if !(apierrors.IsNotFound(err) || apierrors.IsConflict(err)) { // We should return the error here, but historically this routine does // not return an error unless it can't parse the pod name klog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) @@ -104,18 +135,23 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) return true, nil } +func (mc *basicMirrorClient) getNodeUID() (types.UID, error) { + node, err := mc.nodeGetter.Get(mc.nodeName) + if err != nil { + return "", err + } + if node.UID == "" { + return "", fmt.Errorf("UID unset for node %s", mc.nodeName) + } + return node.UID, nil +} + // IsStaticPod returns true if the passed Pod is static. func IsStaticPod(pod *v1.Pod) bool { source, err := kubetypes.GetPodSource(pod) return err == nil && source != kubetypes.ApiserverSource } -// IsMirrorPod returns true if the passed Pod is a Mirror Pod. -func IsMirrorPod(pod *v1.Pod) bool { - _, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] - return ok -} - func getHashFromMirrorPod(pod *v1.Pod) (string, bool) { hash, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] return hash, ok diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go index 7e3ef5edd4a..98f8f00f74f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go @@ -59,7 +59,7 @@ type Manager interface { // whether the pod is found. GetPodByUID(types.UID) (*v1.Pod, bool) // GetPodByMirrorPod returns the static pod for the given mirror pod and - // whether it was known to the pod manger. + // whether it was known to the pod manager. GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool) // GetMirrorPodByPod returns the mirror pod for the given static pod and // whether it was known to the pod manager. @@ -206,7 +206,7 @@ func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) { podFullName := kubecontainer.GetPodFullName(pod) // This logic relies on a static pod and its mirror to have the same name. // It is safe to type convert here due to the IsMirrorPod guard. - if IsMirrorPod(pod) { + if kubetypes.IsMirrorPod(pod) { mirrorPodUID := kubetypes.MirrorPodUID(pod.UID) pm.mirrorPodByUID[mirrorPodUID] = pod pm.mirrorPodByFullName[podFullName] = pod @@ -235,7 +235,7 @@ func (pm *basicManager) DeletePod(pod *v1.Pod) { } podFullName := kubecontainer.GetPodFullName(pod) // It is safe to type convert here due to the IsMirrorPod guard. - if IsMirrorPod(pod) { + if kubetypes.IsMirrorPod(pod) { mirrorPodUID := kubetypes.MirrorPodUID(pod.UID) delete(pm.mirrorPodByUID, mirrorPodUID) delete(pm.mirrorPodByFullName, podFullName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD index 7869ab83683..9be67a0ff67 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/eviction:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", + "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go index 86e3333a68c..81eeff357bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -111,6 +112,11 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, // In future syncPod loops, the kubelet will retry the pod deletion steps that it was stuck on. continue } + if len(insufficientResources) > 0 { + metrics.Preemptions.WithLabelValues(insufficientResources[0].resourceName.String()).Inc() + } else { + metrics.Preemptions.WithLabelValues("").Inc() + } klog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD index 90ccc7d0bbe..516e70996b5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD @@ -35,7 +35,6 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], @@ -58,6 +57,7 @@ go_test( "//pkg/kubelet/prober/results:go_default_library", "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status/testing:go_default_library", + "//pkg/kubelet/util/ioutils:go_default_library", "//pkg/probe:go_default_library", "//pkg/probe/exec:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index 173a23e77f2..dd94f1705b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -252,63 +252,68 @@ func formatURL(scheme string, host string, port int, path string) *url.URL { type execInContainer struct { // run executes a command in a container. Combined stdout and stderr output is always returned. An // error is returned if one occurred. - run func() ([]byte, error) + run func() ([]byte, error) + writer io.Writer } func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { - return execInContainer{func() ([]byte, error) { + return &execInContainer{run: func() ([]byte, error) { return pb.runner.RunInContainer(containerID, cmd, timeout) }} } -func (eic execInContainer) Run() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Run() error { + return nil } -func (eic execInContainer) CombinedOutput() ([]byte, error) { +func (eic *execInContainer) CombinedOutput() ([]byte, error) { return eic.run() } -func (eic execInContainer) Output() ([]byte, error) { +func (eic *execInContainer) Output() ([]byte, error) { return nil, fmt.Errorf("unimplemented") } -func (eic execInContainer) SetDir(dir string) { +func (eic *execInContainer) SetDir(dir string) { //unimplemented } -func (eic execInContainer) SetStdin(in io.Reader) { +func (eic *execInContainer) SetStdin(in io.Reader) { //unimplemented } -func (eic execInContainer) SetStdout(out io.Writer) { - //unimplemented +func (eic *execInContainer) SetStdout(out io.Writer) { + eic.writer = out } -func (eic execInContainer) SetStderr(out io.Writer) { - //unimplemented +func (eic *execInContainer) SetStderr(out io.Writer) { + eic.writer = out } -func (eic execInContainer) SetEnv(env []string) { +func (eic *execInContainer) SetEnv(env []string) { //unimplemented } -func (eic execInContainer) Stop() { +func (eic *execInContainer) Stop() { //unimplemented } -func (eic execInContainer) Start() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Start() error { + data, err := eic.run() + if eic.writer != nil { + eic.writer.Write(data) + } + return err } -func (eic execInContainer) Wait() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Wait() error { + return nil } -func (eic execInContainer) StdoutPipe() (io.ReadCloser, error) { +func (eic *execInContainer) StdoutPipe() (io.ReadCloser, error) { return nil, fmt.Errorf("unimplemented") } -func (eic execInContainer) StderrPipe() (io.ReadCloser, error) { +func (eic *execInContainer) StderrPipe() (io.ReadCloser, error) { return nil, fmt.Errorf("unimplemented") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go index d3eccadfe9e..f0ca8b94192 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go @@ -102,13 +102,13 @@ type manager struct { func NewManager( statusManager status.Manager, livenessManager results.Manager, + startupManager results.Manager, runner kubecontainer.ContainerCommandRunner, refManager *kubecontainer.RefManager, recorder record.EventRecorder) Manager { prober := newProber(runner, refManager, recorder) readinessManager := results.NewManager() - startupManager := results.NewManager() return &manager{ statusManager: statusManager, prober: prober, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go index 58d02c588c3..bb9f494bf20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go @@ -40,14 +40,17 @@ type Manager interface { } // Result is the type for probe results. -type Result bool +type Result int const ( - // Success is encoded as "true" (type Result) - Success Result = true + // Unknown is encoded as -1 (type Result) + Unknown Result = iota - 1 - // Failure is encoded as "false" (type Result) - Failure Result = false + // Success is encoded as 0 (type Result) + Success + + // Failure is encoded as 1 (type Result) + Failure ) func (r Result) String() string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go index e43041855e3..a7882d72c59 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go @@ -20,9 +20,9 @@ import ( "math/rand" "time" - "github.com/prometheus/client_golang/prometheus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/component-base/metrics" "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -69,9 +69,9 @@ type worker struct { // proberResultsMetricLabels holds the labels attached to this worker // for the ProberResults metric by result. - proberResultsSuccessfulMetricLabels prometheus.Labels - proberResultsFailedMetricLabels prometheus.Labels - proberResultsUnknownMetricLabels prometheus.Labels + proberResultsSuccessfulMetricLabels metrics.Labels + proberResultsFailedMetricLabels metrics.Labels + proberResultsUnknownMetricLabels metrics.Labels } // Creates and starts a new probe worker. @@ -101,10 +101,10 @@ func newWorker( case startup: w.spec = container.StartupProbe w.resultsManager = m.startupManager - w.initialValue = results.Failure + w.initialValue = results.Unknown } - basicMetricLabels := prometheus.Labels{ + basicMetricLabels := metrics.Labels{ "probe_type": w.probeType.String(), "container": w.container.Name, "pod": w.pod.Name, @@ -284,8 +284,8 @@ func (w *worker) doProbe() (keepGoing bool) { return true } -func deepCopyPrometheusLabels(m prometheus.Labels) prometheus.Labels { - ret := make(prometheus.Labels, len(m)) +func deepCopyPrometheusLabels(m metrics.Labels) metrics.Labels { + ret := make(metrics.Labels, len(m)) for k, v := range m { ret[k] = v } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go index 46c271afc80..6210ed45e48 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package qos contains helper functions for quality of service. +// Package qos contains helper functions for quality of service. // For each resource (memory, CPU) Kubelet supports three classes of containers. // Memory guaranteed containers will receive the highest priority and will get all the resources // they need. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go index 344b0d99ce9..b1e8ed998bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -27,15 +27,18 @@ const ( // sense to set sandbox level oom score, e.g. a sandbox could only be a namespace // without a process. // TODO: Handle infra container oom score adj in a runtime agnostic way. - PodInfraOOMAdj int = -998 - KubeletOOMScoreAdj int = -999 - DockerOOMScoreAdj int = -999 + PodInfraOOMAdj int = -998 + // KubeletOOMScoreAdj is the OOM score adjustment for Kubelet + KubeletOOMScoreAdj int = -999 + // DockerOOMScoreAdj is the OOM score adjustment for Docker + DockerOOMScoreAdj int = -999 + // KubeProxyOOMScoreAdj is the OOM score adjustment for kube-proxy KubeProxyOOMScoreAdj int = -999 guaranteedOOMScoreAdj int = -998 besteffortOOMScoreAdj int = 1000 ) -// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the +// GetContainerOOMScoreAdjust returns the amount by which the OOM score of all processes in the // container should be adjusted. // The OOM score of a process is the percentage of memory it consumes // multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go index 4c42ab8a7ce..60327baf954 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go @@ -40,8 +40,8 @@ type ReasonCache struct { cache *lru.Cache } -// Reason is the cached item in ReasonCache -type reasonItem struct { +// ReasonItem is the cached item in ReasonCache +type ReasonItem struct { Err error Message string } @@ -64,7 +64,7 @@ func (c *ReasonCache) composeKey(uid types.UID, name string) string { func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) { c.lock.Lock() defer c.lock.Unlock() - c.cache.Add(c.composeKey(uid, name), reasonItem{reason, message}) + c.cache.Add(c.composeKey(uid, name), ReasonItem{reason, message}) } // Update updates the reason cache with the SyncPodResult. Only SyncResult with @@ -93,13 +93,13 @@ func (c *ReasonCache) Remove(uid types.UID, name string) { // Get gets error reason from the cache. The return values are error reason, error message and // whether an error reason is found in the cache. If no error reason is found, empty string will // be returned for error reason and error message. -func (c *ReasonCache) Get(uid types.UID, name string) (*reasonItem, bool) { +func (c *ReasonCache) Get(uid types.UID, name string) (*ReasonItem, bool) { c.lock.Lock() defer c.lock.Unlock() value, ok := c.cache.Get(c.composeKey(uid, name)) if !ok { return nil, false } - info := value.(reasonItem) + info := value.(ReasonItem) return &info, true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD index 87ea8f01d82..d83619fcbe5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD @@ -52,8 +52,6 @@ go_library( "//vendor/github.com/google/cadvisor/container:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -93,6 +91,7 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/BUILD index f8fdebf05e4..5afe16064aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/BUILD @@ -12,7 +12,6 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/metrics.go index f5c7ad5f633..d287de225db 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/metrics/metrics.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -51,7 +49,7 @@ var ( Name: "http_requests_duration_seconds", Help: "Duration in seconds to serve http requests", // Use DefBuckets for now, will customize the buckets if necessary. - Buckets: prometheus.DefBuckets, + Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, }, []string{"method", "path", "server_type", "long_running"}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 324e8ab8576..71e17a4578a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -36,12 +36,10 @@ import ( cadvisormetrics "github.com/google/cadvisor/container" cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/metrics" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" "google.golang.org/grpc" "k8s.io/klog" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -147,6 +145,8 @@ func ListenAndServeKubeletServer( s := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), Handler: &handler, + ReadTimeout: 4 * 60 * time.Minute, + WriteTimeout: 4 * 60 * time.Minute, MaxHeaderBytes: 1 << 20, } if tlsOptions != nil { @@ -260,7 +260,7 @@ func (s *Server) InstallAuthFilter() { attrs := s.auth.GetRequestAttributes(info.User, req.Request) // Authorize - decision, _, err := s.auth.Authorize(attrs) + decision, _, err := s.auth.Authorize(req.Request.Context(), attrs) if err != nil { msg := fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", attrs.GetUser().GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) klog.Errorf(msg, err) @@ -301,7 +301,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { s.restfulCont.Handle(metricsPath, legacyregistry.Handler()) // cAdvisor metrics are exposed under the secured handler as well - r := prometheus.NewRegistry() + r := compbasemetrics.NewKubeRegistry() includedMetrics := cadvisormetrics.MetricSet{ cadvisormetrics.CpuUsageMetrics: struct{}{}, @@ -314,15 +314,15 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { cadvisormetrics.AppMetrics: struct{}{}, cadvisormetrics.ProcessMetrics: struct{}{}, } - r.MustRegister(metrics.NewPrometheusCollector(prometheusHostAdapter{s.host}, containerPrometheusLabelsFunc(s.host), includedMetrics)) + r.RawMustRegister(metrics.NewPrometheusCollector(prometheusHostAdapter{s.host}, containerPrometheusLabelsFunc(s.host), includedMetrics)) s.restfulCont.Handle(cadvisorMetricsPath, - promhttp.HandlerFor(r, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}), + compbasemetrics.HandlerFor(r, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}), ) - v1alpha1ResourceRegistry := prometheus.NewRegistry() - v1alpha1ResourceRegistry.MustRegister(stats.NewPrometheusResourceMetricCollector(s.resourceAnalyzer, v1alpha1.Config())) + v1alpha1ResourceRegistry := compbasemetrics.NewKubeRegistry() + v1alpha1ResourceRegistry.CustomMustRegister(stats.NewPrometheusResourceMetricCollector(s.resourceAnalyzer, v1alpha1.Config())) s.restfulCont.Handle(path.Join(resourceMetricsPathPrefix, v1alpha1.Version), - promhttp.HandlerFor(v1alpha1ResourceRegistry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}), + compbasemetrics.HandlerFor(v1alpha1ResourceRegistry, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}), ) // prober metrics are exposed under a different endpoint @@ -331,7 +331,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { compbasemetrics.RegisterProcessStartTime(p.RawRegister) p.MustRegister(prober.ProberResults) s.restfulCont.Handle(proberMetricsPath, - promhttp.HandlerFor(p, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}), + compbasemetrics.HandlerFor(p, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}), ) if enableCAdvisorJSONEndpoints { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD index 954fec082d1..878382309f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD @@ -26,9 +26,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -49,8 +49,8 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/mock:go_default_library", ] + select({ @@ -70,6 +70,10 @@ go_test( "//pkg/kubelet/cm:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/kubelet/cm:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/prometheus_resource_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/prometheus_resource_metrics.go index 050bc20a73c..701c897126a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/prometheus_resource_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/prometheus_resource_metrics.go @@ -19,32 +19,29 @@ package stats import ( "time" + "k8s.io/component-base/metrics" "k8s.io/klog" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - - "github.com/prometheus/client_golang/prometheus" ) // NodeResourceMetric describes a metric for the node type NodeResourceMetric struct { - Name string - Description string - ValueFn func(stats.NodeStats) (*float64, time.Time) + Desc *metrics.Desc + ValueFn func(stats.NodeStats) (*float64, time.Time) } -func (n *NodeResourceMetric) desc() *prometheus.Desc { - return prometheus.NewDesc(n.Name, n.Description, []string{}, nil) +func (n *NodeResourceMetric) desc() *metrics.Desc { + return n.Desc } // ContainerResourceMetric describes a metric for containers type ContainerResourceMetric struct { - Name string - Description string - ValueFn func(stats.ContainerStats) (*float64, time.Time) + Desc *metrics.Desc + ValueFn func(stats.ContainerStats) (*float64, time.Time) } -func (n *ContainerResourceMetric) desc() *prometheus.Desc { - return prometheus.NewDesc(n.Name, n.Description, []string{"container", "pod", "namespace"}, nil) +func (n *ContainerResourceMetric) desc() *metrics.Desc { + return n.Desc } // ResourceMetricsConfig specifies which metrics to collect and export @@ -53,29 +50,34 @@ type ResourceMetricsConfig struct { ContainerMetrics []ContainerResourceMetric } -// NewPrometheusResourceMetricCollector returns a prometheus.Collector which exports resource metrics -func NewPrometheusResourceMetricCollector(provider SummaryProvider, config ResourceMetricsConfig) prometheus.Collector { +// NewPrometheusResourceMetricCollector returns a metrics.StableCollector which exports resource metrics +func NewPrometheusResourceMetricCollector(provider SummaryProvider, config ResourceMetricsConfig) metrics.StableCollector { return &resourceMetricCollector{ provider: provider, config: config, - errors: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "scrape_error", - Help: "1 if there was an error while getting container metrics, 0 otherwise", - }), + errors: metrics.NewDesc("scrape_error", + "1 if there was an error while getting container metrics, 0 otherwise", + nil, + nil, + metrics.ALPHA, + ""), } } type resourceMetricCollector struct { + metrics.BaseStableCollector + provider SummaryProvider config ResourceMetricsConfig - errors prometheus.Gauge + errors *metrics.Desc } -var _ prometheus.Collector = &resourceMetricCollector{} +var _ metrics.StableCollector = &resourceMetricCollector{} + +// DescribeWithStability implements metrics.StableCollector +func (rc *resourceMetricCollector) DescribeWithStability(ch chan<- *metrics.Desc) { + ch <- rc.errors -// Describe implements prometheus.Collector -func (rc *resourceMetricCollector) Describe(ch chan<- *prometheus.Desc) { - rc.errors.Describe(ch) for _, metric := range rc.config.NodeMetrics { ch <- metric.desc() } @@ -84,24 +86,26 @@ func (rc *resourceMetricCollector) Describe(ch chan<- *prometheus.Desc) { } } -// Collect implements prometheus.Collector -// Since new containers are frequently created and removed, using the prometheus.Gauge Collector would +// CollectWithStability implements metrics.StableCollector +// Since new containers are frequently created and removed, using the Gauge would // leak metric collectors for containers or pods that no longer exist. Instead, implement -// prometheus.Collector in a way that only collects metrics for active containers. -func (rc *resourceMetricCollector) Collect(ch chan<- prometheus.Metric) { - rc.errors.Set(0) - defer rc.errors.Collect(ch) +// custom collector in a way that only collects metrics for active containers. +func (rc *resourceMetricCollector) CollectWithStability(ch chan<- metrics.Metric) { + var errorCount float64 + defer func() { + ch <- metrics.NewLazyConstMetric(rc.errors, metrics.GaugeValue, errorCount) + }() summary, err := rc.provider.GetCPUAndMemoryStats() if err != nil { - rc.errors.Set(1) + errorCount = 1 klog.Warningf("Error getting summary for resourceMetric prometheus endpoint: %v", err) return } for _, metric := range rc.config.NodeMetrics { if value, timestamp := metric.ValueFn(summary.Node); value != nil { - ch <- prometheus.NewMetricWithTimestamp(timestamp, - prometheus.MustNewConstMetric(metric.desc(), prometheus.GaugeValue, *value)) + ch <- metrics.NewLazyMetricWithTimestamp(timestamp, + metrics.NewLazyConstMetric(metric.desc(), metrics.GaugeValue, *value)) } } @@ -109,8 +113,8 @@ func (rc *resourceMetricCollector) Collect(ch chan<- prometheus.Metric) { for _, container := range pod.Containers { for _, metric := range rc.config.ContainerMetrics { if value, timestamp := metric.ValueFn(container); value != nil { - ch <- prometheus.NewMetricWithTimestamp(timestamp, - prometheus.MustNewConstMetric(metric.desc(), prometheus.GaugeValue, *value, container.Name, pod.PodRef.Name, pod.PodRef.Namespace)) + ch <- metrics.NewLazyMetricWithTimestamp(timestamp, + metrics.NewLazyConstMetric(metric.desc(), metrics.GaugeValue, *value, container.Name, pod.PodRef.Name, pod.PodRef.Namespace)) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go index 492c16bbd77..bdce37a4cc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go @@ -661,7 +661,8 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi if nanoSeconds <= 0 { return nil, fmt.Errorf("zero or negative interval (%v - %v)", newStats.Timestamp, cachedStats.Timestamp) } - usageNanoCores := (newStats.UsageCoreNanoSeconds.Value - cachedStats.UsageCoreNanoSeconds.Value) * uint64(time.Second/time.Nanosecond) / uint64(nanoSeconds) + usageNanoCores := uint64(float64(newStats.UsageCoreNanoSeconds.Value-cachedStats.UsageCoreNanoSeconds.Value) / + float64(nanoSeconds) * float64(time.Second/time.Nanosecond)) // Update cache with new value. usageToUpdate := usageNanoCores @@ -671,7 +672,7 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi }() if err != nil { - // This should not happen. Log now to raise visiblity + // This should not happen. Log now to raise visibility klog.Errorf("failed updating cpu usage nano core: %v", err) } return usage diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/BUILD index d88396b7d6a..ce128b9e8e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/BUILD @@ -14,6 +14,7 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", @@ -24,6 +25,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go index 8d9415d2247..ef87faa993c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go @@ -25,11 +25,16 @@ import ( ) const ( + // UnknownContainerStatuses says that all container statuses are unknown. UnknownContainerStatuses = "UnknownContainerStatuses" - PodCompleted = "PodCompleted" - ContainersNotReady = "ContainersNotReady" + // PodCompleted says that all related containers have succeeded. + PodCompleted = "PodCompleted" + // ContainersNotReady says that one or more containers are not ready. + ContainersNotReady = "ContainersNotReady" + // ContainersNotInitialized says that one or more init containers have not succeeded. ContainersNotInitialized = "ContainersNotInitialized" - ReadinessGatesNotReady = "ReadinessGatesNotReady" + // ReadinessGatesNotReady says that one or more pod readiness gates are not ready. + ReadinessGatesNotReady = "ReadinessGatesNotReady" ) // GenerateContainersReadyCondition returns the status of "ContainersReady" condition. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 5e668a57eb5..3029d926c1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -79,7 +79,7 @@ type PodStatusProvider interface { GetPodStatus(uid types.UID) (v1.PodStatus, bool) } -// An object which provides guarantees that a pod can be safely deleted. +// PodDeletionSafetyProvider provides guarantees that a pod can be safely deleted. type PodDeletionSafetyProvider interface { // A function which returns true if the pod can safely be deleted PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool @@ -115,6 +115,7 @@ type Manager interface { const syncPeriod = 10 * time.Second +// NewManager returns a functional Manager. func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager { return &manager{ kubeClient: kubeClient, @@ -520,7 +521,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { // TODO: make me easier to express from client code pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { - klog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) + klog.V(3).Infof("Pod %q does not exist on the server", format.PodDesc(status.podName, status.podNamespace, uid)) // If the Pod is deleted the status will be cleared in // RemoveOrphanedStatuses, so we just ignore the update here. return @@ -580,7 +581,7 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { } func (m *manager) canBeDeleted(pod *v1.Pod, status v1.PodStatus) bool { - if pod.DeletionTimestamp == nil || kubepod.IsMirrorPod(pod) { + if pod.DeletionTimestamp == nil || kubetypes.IsMirrorPod(pod) { return false } return m.podDeletionSafety.PodResourcesAreReclaimed(pod, status) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD index 9f7d4db7caa..3980947498d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD @@ -21,6 +21,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go index 76819ac72df..6c2a98713c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go @@ -25,6 +25,7 @@ import ( "time" authenticationv1 "k8s.io/api/authentication/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" @@ -39,12 +40,35 @@ const ( // NewManager returns a new token manager. func NewManager(c clientset.Interface) *Manager { + // check whether the server supports token requests so we can give a more helpful error message + supported := false + once := &sync.Once{} + tokenRequestsSupported := func() bool { + once.Do(func() { + resources, err := c.Discovery().ServerResourcesForGroupVersion("v1") + if err != nil { + return + } + for _, resource := range resources.APIResources { + if resource.Name == "serviceaccounts/token" { + supported = true + return + } + } + }) + return supported + } + m := &Manager{ getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { if c == nil { return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode") } - return c.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr) + tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr) + if apierrors.IsNotFound(err) && !tokenRequestsSupported() { + return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled") + } + return tokenRequest, err }, cache: make(map[string]*authenticationv1.TokenRequest), clock: clock.RealClock{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 9c3d676cd09..d3c06c8e755 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -137,11 +137,26 @@ func (sp SyncPodType) String() string { } } +// IsMirrorPod returns true if the passed Pod is a Mirror Pod. +func IsMirrorPod(pod *v1.Pod) bool { + _, ok := pod.Annotations[ConfigMirrorAnnotationKey] + return ok +} + +// IsStaticPod returns true if the pod is a static pod. +func IsStaticPod(pod *v1.Pod) bool { + source, err := GetPodSource(pod) + return err == nil && source != ApiserverSource +} + // IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority. func IsCriticalPod(pod *v1.Pod) bool { if IsStaticPod(pod) { return true } + if IsMirrorPod(pod) { + return true + } if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) { return true } @@ -166,9 +181,3 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool { func IsCriticalPodBasedOnPriority(priority int32) bool { return priority >= scheduling.SystemCriticalPriority } - -// IsStaticPod returns true if the pod is a static pod. -func IsStaticPod(pod *v1.Pod) bool { - source, err := GetPodSource(pod) - return err == nil && source != ApiserverSource -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD index f663c28d5ce..b07f4504775 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD @@ -14,6 +14,10 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", @@ -22,6 +26,10 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", @@ -50,6 +58,10 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -58,6 +70,10 @@ go_library( "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD index e84deec931e..43bd2626084 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD @@ -1,9 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -23,3 +20,10 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["ioutils_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go index 42f1998c794..1b2b5a6d5dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go @@ -35,3 +35,36 @@ func (w *writeCloserWrapper) Close() error { func WriteCloserWrapper(w io.Writer) io.WriteCloser { return &writeCloserWrapper{w} } + +// LimitWriter is a copy of the standard library ioutils.LimitReader, +// applied to the writer interface. +// LimitWriter returns a Writer that writes to w +// but stops with EOF after n bytes. +// The underlying implementation is a *LimitedWriter. +func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} } + +// A LimitedWriter writes to W but limits the amount of +// data returned to just N bytes. Each call to Write +// updates N to reflect the new amount remaining. +// Write returns EOF when N <= 0 or when the underlying W returns EOF. +type LimitedWriter struct { + W io.Writer // underlying writer + N int64 // max bytes remaining +} + +func (l *LimitedWriter) Write(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, io.ErrShortWrite + } + truncated := false + if int64(len(p)) > l.N { + p = p[0:l.N] + truncated = true + } + n, err = l.W.Write(p) + l.N -= int64(n) + if err == nil && truncated { + err = io.ErrShortWrite + } + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go index 7135a1300c0..5556d0cc714 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go @@ -22,6 +22,8 @@ import ( "runtime" "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" @@ -36,14 +38,10 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/configmap" - "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/mountpod" "k8s.io/kubernetes/pkg/kubelet/secret" "k8s.io/kubernetes/pkg/kubelet/token" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - execmnt "k8s.io/kubernetes/pkg/volume/util/exec" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/subpath" ) @@ -80,20 +78,16 @@ func NewInitializedVolumePluginMgr( } } - mountPodManager, err := mountpod.NewManager(kubelet.getRootDir(), kubelet.podManager) - if err != nil { - return nil, err - } kvh := &kubeletVolumeHost{ kubelet: kubelet, volumePluginMgr: volume.VolumePluginMgr{}, secretManager: secretManager, configMapManager: configMapManager, tokenManager: tokenManager, - mountPodManager: mountPodManager, informerFactory: informerFactory, csiDriverLister: csiDriverLister, csiDriversSynced: csiDriversSynced, + exec: utilexec.New(), } if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil { @@ -119,10 +113,10 @@ type kubeletVolumeHost struct { secretManager secret.Manager tokenManager *token.Manager configMapManager configmap.Manager - mountPodManager mountpod.Manager informerFactory informers.SharedInformerFactory csiDriverLister storagelisters.CSIDriverLister csiDriversSynced cache.InformerSynced + exec utilexec.Interface } func (kvh *kubeletVolumeHost) SetKubeletError(err error) { @@ -227,16 +221,7 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface { } func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface { - exec, err := kvh.getMountExec(pluginName) - if err != nil { - klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) - // Use the default mounter - exec = nil - } - if exec == nil { - return kvh.kubelet.mounter - } - return execmnt.NewExecMounter(exec, kvh.kubelet.mounter) + return kvh.kubelet.mounter } func (kvh *kubeletVolumeHost) GetHostName() string { @@ -287,57 +272,6 @@ func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder { return kvh.kubelet.recorder } -func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { - exec, err := kvh.getMountExec(pluginName) - if err != nil { - klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) - // Use the default exec - exec = nil - } - if exec == nil { - return mount.NewOSExec() - } - return exec -} - -// getMountExec returns mount.Exec implementation that leads to pod with mount -// utilities. It returns nil,nil when there is no such pod and default mounter / -// os.Exec should be used. -func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) { - if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) { - klog.V(5).Infof("using default mounter/exec for %s", pluginName) - return nil, nil - } - - pod, container, err := kvh.mountPodManager.GetMountPod(pluginName) - if err != nil { - return nil, err - } - if pod == nil { - // Use default mounter/exec for this plugin - klog.V(5).Infof("using default mounter/exec for %s", pluginName) - return nil, nil - } - klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName) - return &containerExec{ - pod: pod, - containerName: container, - kl: kvh.kubelet, - }, nil -} - -// containerExec is implementation of mount.Exec that executes commands in given -// container in given pod. -type containerExec struct { - pod *v1.Pod - containerName string - kl *Kubelet -} - -var _ mount.Exec = &containerExec{} - -func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) { - cmdline := append([]string{cmd}, args...) - klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) - return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline) +func (kvh *kubeletVolumeHost) GetExec(pluginName string) utilexec.Interface { + return kvh.exec } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD index 14a84296c55..df2965a5321 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD @@ -23,8 +23,8 @@ go_library( "//pkg/kubelet/volumemanager/metrics:go_default_library", "//pkg/kubelet/volumemanager/populator:go_default_library", "//pkg/kubelet/volumemanager/reconciler:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/csimigration:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/hostutil:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", @@ -37,7 +37,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/csi-translation-lib:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -55,7 +57,6 @@ go_test( "//pkg/kubelet/secret:go_default_library", "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status/testing:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -71,6 +72,7 @@ go_test( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 0cc57aed0c4..08fe393e5a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -492,11 +492,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( asw.Lock() defer asw.Unlock() for volumeName, volumeObj := range asw.attachedVolumes { - for mountedPodName, podObj := range volumeObj.mountedPods { - if mountedPodName != podName { - continue - } - + if podObj, podExists := volumeObj.mountedPods[podName]; podExists { volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { @@ -523,14 +519,14 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( podName volumetypes.UniquePodName) { asw.Lock() defer asw.Unlock() - volumeObj, exist := asw.attachedVolumes[volumeName] - if !exist { + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { klog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) return } - podObj, exist := volumeObj.mountedPods[podName] - if !exist { + podObj, podExists := volumeObj.mountedPods[podName] + if !podExists { klog.Warningf("MarkFSResizeRequired for volume %s failed "+ "as pod(%s) not exist", volumeName, podName) return @@ -648,8 +644,8 @@ func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.Uniq asw.RLock() defer asw.RUnlock() for _, volumeObj := range asw.attachedVolumes { - for name, podObj := range volumeObj.mountedPods { - if podName == name && podObj.volumeSpec.Name() == volumeSpecName { + if podObj, podExists := volumeObj.mountedPods[podName]; podExists { + if podObj.volumeSpec.Name() == volumeSpecName { return true } } @@ -687,12 +683,10 @@ func (asw *actualStateOfWorld) GetMountedVolumesForPod( defer asw.RUnlock() mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */) for _, volumeObj := range asw.attachedVolumes { - for mountedPodName, podObj := range volumeObj.mountedPods { - if mountedPodName == podName { - mountedVolume = append( - mountedVolume, - getMountedVolume(&podObj, &volumeObj)) - } + if podObj, podExists := volumeObj.mountedPods[podName]; podExists { + mountedVolume = append( + mountedVolume, + getMountedVolume(&podObj, &volumeObj)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 3387a2a93b4..7d4ee41be37 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -362,8 +362,8 @@ func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePod dsw.RLock() defer dsw.RUnlock() for _, volumeObj := range dsw.volumesToMount { - for name, podObj := range volumeObj.podsToMount { - if podName == name && podObj.volumeSpec.Name() == volumeSpecName { + if podObj, podExists := volumeObj.podsToMount[podName]; podExists { + if podObj.volumeSpec.Name() == volumeSpecName { return true } } @@ -378,9 +378,7 @@ func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool { podList := make(map[types.UniquePodName]bool) for _, volumeObj := range dsw.volumesToMount { for podName := range volumeObj.podsToMount { - if !podList[podName] { - podList[podName] = true - } + podList[podName] = true } } return podList diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD index 0ac9ca9fb1b..385d70102e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD @@ -9,9 +9,8 @@ go_library( "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go index 86f800b0bc4..e9e4bd04fb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go @@ -17,11 +17,10 @@ limitations under the License. package metrics import ( - "k8s.io/component-base/metrics/legacyregistry" "sync" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -37,11 +36,12 @@ const ( var ( registerMetrics sync.Once - totalVolumesDesc = prometheus.NewDesc( + totalVolumesDesc = metrics.NewDesc( volumeManagerTotalVolumes, "Number of volumes in Volume Manager", []string{"plugin_name", "state"}, nil, + metrics.ALPHA, "", ) ) @@ -60,36 +60,34 @@ func (v volumeCount) add(state, plugin string) { // Register registers Volume Manager metrics. func Register(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, pluginMgr *volume.VolumePluginMgr) { registerMetrics.Do(func() { - legacyregistry.RawMustRegister(&totalVolumesCollector{asw, dsw, pluginMgr}) + legacyregistry.CustomMustRegister(&totalVolumesCollector{asw: asw, dsw: dsw, pluginMgr: pluginMgr}) }) } type totalVolumesCollector struct { + metrics.BaseStableCollector + asw cache.ActualStateOfWorld dsw cache.DesiredStateOfWorld pluginMgr *volume.VolumePluginMgr } -var _ prometheus.Collector = &totalVolumesCollector{} +var _ metrics.StableCollector = &totalVolumesCollector{} -// Describe implements the prometheus.Collector interface. -func (c *totalVolumesCollector) Describe(ch chan<- *prometheus.Desc) { +// DescribeWithStability implements the metrics.StableCollector interface. +func (c *totalVolumesCollector) DescribeWithStability(ch chan<- *metrics.Desc) { ch <- totalVolumesDesc } -// Collect implements the prometheus.Collector interface. -func (c *totalVolumesCollector) Collect(ch chan<- prometheus.Metric) { +// CollectWithStability implements the metrics.StableCollector interface. +func (c *totalVolumesCollector) CollectWithStability(ch chan<- metrics.Metric) { for stateName, pluginCount := range c.getVolumeCount() { for pluginName, count := range pluginCount { - metric, err := prometheus.NewConstMetric(totalVolumesDesc, - prometheus.GaugeValue, + ch <- metrics.NewLazyConstMetric(totalVolumesDesc, + metrics.GaugeValue, float64(count), pluginName, stateName) - if err != nil { - klog.Warningf("Failed to create metric : %v", err) - } - ch <- metric } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD index 68a003ac6fe..09896dcadd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/csimigration:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -59,6 +60,7 @@ go_test( "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status/testing:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", + "//pkg/volume/csimigration:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", @@ -70,5 +72,6 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//staging/src/k8s.io/csi-translation-lib:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 6b13e303423..fce0d839911 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -43,6 +43,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" ) @@ -87,7 +88,9 @@ func NewDesiredStateOfWorldPopulator( desiredStateOfWorld cache.DesiredStateOfWorld, actualStateOfWorld cache.ActualStateOfWorld, kubeContainerRuntime kubecontainer.Runtime, - keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator { + keepTerminatedPodVolumes bool, + csiMigratedPluginManager csimigration.PluginManager, + intreeToCSITranslator csimigration.InTreeToCSITranslator) DesiredStateOfWorldPopulator { return &desiredStateOfWorldPopulator{ kubeClient: kubeClient, loopSleepDuration: loopSleepDuration, @@ -102,6 +105,8 @@ func NewDesiredStateOfWorldPopulator( keepTerminatedPodVolumes: keepTerminatedPodVolumes, hasAddedPods: false, hasAddedPodsLock: sync.RWMutex{}, + csiMigratedPluginManager: csiMigratedPluginManager, + intreeToCSITranslator: intreeToCSITranslator, } } @@ -119,6 +124,8 @@ type desiredStateOfWorldPopulator struct { keepTerminatedPodVolumes bool hasAddedPods bool hasAddedPodsLock sync.RWMutex + csiMigratedPluginManager csimigration.PluginManager + intreeToCSITranslator csimigration.InTreeToCSITranslator } type processedPods struct { @@ -298,6 +305,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( allVolumesAdded := true mounts, devices := util.GetPodVolumeNames(pod) + expandInUsePV := utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) // Process volume spec for each volume defined in pod for _, podVolume := range pod.Spec.Volumes { if !mounts.Has(podVolume.Name) && !devices.Has(podVolume.Name) { @@ -331,15 +339,15 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( err) dswp.desiredStateOfWorld.AddErrorToPod(uniquePodName, err.Error()) allVolumesAdded = false + } else { + klog.V(4).Infof( + "Added volume %q (volSpec=%q) for pod %q to desired state.", + podVolume.Name, + volumeSpec.Name(), + uniquePodName) } - klog.V(4).Infof( - "Added volume %q (volSpec=%q) for pod %q to desired state.", - podVolume.Name, - volumeSpec.Name(), - uniquePodName) - - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { + if expandInUsePV { dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec, uniquePodName, mountedVolumesForPod, processedVolumesForFSResize) } @@ -504,19 +512,35 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( pvcSource.ClaimName, pvcUID) - // TODO: remove feature gate check after no longer needed - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := util.GetVolumeMode(volumeSpec) + migratable, err := dswp.csiMigratedPluginManager.IsMigratable(volumeSpec) + if err != nil { + return nil, nil, "", err + } + if migratable { + volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, dswp.intreeToCSITranslator) if err != nil { return nil, nil, "", err } - // Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem - if mounts.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeFilesystem { - return nil, nil, "", fmt.Errorf( - "volume %s has volumeMode %s, but is specified in volumeMounts", - podVolume.Name, - volumeMode) - } + } + + // TODO: replace this with util.GetVolumeMode() when features.BlockVolume is removed. + // The function will return the right value then. + volumeMode := v1.PersistentVolumeFilesystem + if volumeSpec.PersistentVolume != nil && volumeSpec.PersistentVolume.Spec.VolumeMode != nil { + volumeMode = *volumeSpec.PersistentVolume.Spec.VolumeMode + } + + // TODO: remove features.BlockVolume checks / comments after no longer needed + // Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem. + // Do not check feature gate here to make sure even when the feature is disabled in kubelet, + // because controller-manager / API server can already contain block PVs / PVCs. + if mounts.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeFilesystem { + return nil, nil, "", fmt.Errorf( + "volume %s has volumeMode %s, but is specified in volumeMounts", + podVolume.Name, + volumeMode) + } + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { // Error if a container has volumeDevices but the volumeMode of PVC isn't Block if devices.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeBlock { return nil, nil, "", fmt.Errorf( @@ -531,7 +555,18 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( // Do not return the original volume object, since the source could mutate it clonedPodVolume := podVolume.DeepCopy() - return nil, volume.NewSpecFromVolume(clonedPodVolume), "", nil + spec := volume.NewSpecFromVolume(clonedPodVolume) + migratable, err := dswp.csiMigratedPluginManager.IsMigratable(spec) + if err != nil { + return nil, nil, "", err + } + if migratable { + spec, err = csimigration.TranslateInTreeSpecToCSI(spec, dswp.intreeToCSITranslator) + if err != nil { + return nil, nil, "", err + } + } + return nil, spec, "", nil } // getPVCExtractPV fetches the PVC object with the given namespace and name from diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD index 523ca37a03d..f70e7a9090c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD @@ -15,7 +15,6 @@ go_library( "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -29,6 +28,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/path:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], @@ -41,7 +41,6 @@ go_test( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -60,6 +59,7 @@ go_test( "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index 4199dd6b527..3524446d5d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -24,28 +24,30 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "time" + "k8s.io/klog" + "k8s.io/utils/mount" + utilpath "k8s.io/utils/path" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" - "k8s.io/kubernetes/pkg/util/mount" volumepkg "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - utilpath "k8s.io/utils/path" - utilstrings "k8s.io/utils/strings" ) // Reconciler runs a periodic loop to reconcile the desired state of the world @@ -352,10 +354,10 @@ type reconstructedVolume struct { volumeSpec *volumepkg.Spec outerVolumeSpecName string pod *v1.Pod - attachablePlugin volumepkg.AttachableVolumePlugin volumeGidValue string devicePath string mounter volumepkg.Mounter + deviceMounter volumepkg.DeviceMounter blockVolumeMapper volumepkg.BlockVolumeMapper } @@ -499,6 +501,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, var volumeMapper volumepkg.BlockVolumeMapper var volumeMounter volumepkg.Mounter + var deviceMounter volumepkg.DeviceMounter // Path to the mount or block device to check var checkPath string @@ -518,7 +521,8 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, pod.UID, newMapperErr) } - checkPath, _ = volumeMapper.GetPodDeviceMapPath() + mapDir, linkName := volumeMapper.GetPodDeviceMapPath() + checkPath = filepath.Join(mapDir, linkName) } else { var err error volumeMounter, err = plugin.NewMounter( @@ -535,6 +539,17 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, err) } checkPath = volumeMounter.GetPath() + if deviceMountablePlugin != nil { + deviceMounter, err = deviceMountablePlugin.NewDeviceMounter() + if err != nil { + return nil, fmt.Errorf("reconstructVolume.NewDeviceMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v", + uniqueVolumeName, + volumeSpec.Name(), + volume.podName, + pod.UID, + err) + } + } } // Check existence of mount point for filesystem volume or symbolic link for block volume @@ -556,7 +571,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, // TODO: in case pod is added back before reconciler starts to unmount, we can update this field from desired state information outerVolumeSpecName: volume.volumeSpecName, pod: pod, - attachablePlugin: attachablePlugin, + deviceMounter: deviceMounter, volumeGidValue: "", // devicePath is updated during updateStates() by checking node status's VolumesAttached data. // TODO: get device path directly from the volume mount path. @@ -583,25 +598,19 @@ func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName } } +// getDeviceMountPath returns device mount path for block volume which +// implements BlockVolumeMapper or filesystem volume which implements +// DeviceMounter func getDeviceMountPath(volume *reconstructedVolume) (string, error) { - volumeAttacher, err := volume.attachablePlugin.NewAttacher() - if volumeAttacher == nil || err != nil { - return "", err - } - deviceMountPath, err := - volumeAttacher.GetDeviceMountPath(volume.volumeSpec) - if err != nil { - return "", err - } - if volume.blockVolumeMapper != nil { - deviceMountPath, err = - volume.blockVolumeMapper.GetGlobalMapPath(volume.volumeSpec) - if err != nil { - return "", err - } + // for block volume, we return its global map path + return volume.blockVolumeMapper.GetGlobalMapPath(volume.volumeSpec) + } else if volume.deviceMounter != nil { + // for filesystem volume, we return its device mount path if the plugin implements DeviceMounter + return volume.deviceMounter.GetDeviceMountPath(volume.volumeSpec) + } else { + return "", fmt.Errorf("blockVolumeMapper or deviceMounter required") } - return deviceMountPath, nil } func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { @@ -630,7 +639,8 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re continue } klog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) - if volume.attachablePlugin != nil { + // If the volume has device to mount, we mark its device as mounted. + if volume.deviceMounter != nil || volume.blockVolumeMapper != nil { deviceMountPath, err := getDeviceMountPath(volume) if err != nil { klog.Errorf("Could not find device mount path for volume %s", volume.volumeName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index 0aa368761e5..3fe144a073f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -24,6 +24,9 @@ import ( "strings" "time" + "k8s.io/klog" + "k8s.io/utils/mount" + v1 "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -31,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" - "k8s.io/klog" + csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" @@ -41,8 +44,8 @@ import ( "k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics" "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" @@ -174,6 +177,11 @@ func NewVolumeManager( blockVolumePathHandler)), } + intreeToCSITranslator := csitrans.New() + csiMigratedPluginManager := csimigration.NewPluginManager(intreeToCSITranslator) + + vm.intreeToCSITranslator = intreeToCSITranslator + vm.csiMigratedPluginManager = csiMigratedPluginManager vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator( kubeClient, desiredStateOfWorldPopulatorLoopSleepPeriod, @@ -183,7 +191,9 @@ func NewVolumeManager( vm.desiredStateOfWorld, vm.actualStateOfWorld, kubeContainerRuntime, - keepTerminatedPodVolumes) + keepTerminatedPodVolumes, + csiMigratedPluginManager, + intreeToCSITranslator) vm.reconciler = reconciler.NewReconciler( kubeClient, controllerAttachDetachEnabled, @@ -238,6 +248,12 @@ type volumeManager struct { // desiredStateOfWorldPopulator runs an asynchronous periodic loop to // populate the desiredStateOfWorld using the kubelet PodManager. desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator + + // csiMigratedPluginManager keeps track of CSI migration status of plugins + csiMigratedPluginManager csimigration.PluginManager + + // intreeToCSITranslator translates in-tree volume specs to CSI + intreeToCSITranslator csimigration.InTreeToCSITranslator } func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD index b3a1150d791..53f98edbf7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD @@ -20,6 +20,7 @@ go_library( "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", + "//vendor/golang.org/x/sys/windows/registry:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go index 317c08bf028..4d8a2355197 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go @@ -78,19 +78,14 @@ func (p *perfCounterNodeStatsClient) startMonitoring() error { return err } - kernelVersion, err := getKernelVersion() - if err != nil { - return err - } - - osImageVersion, err := getOSImageVersion() + osInfo, err := GetOSInfo() if err != nil { return err } p.nodeInfo = nodeInfo{ - kernelVersion: kernelVersion, - osImageVersion: osImageVersion, + kernelVersion: osInfo.GetPatchVersion(), + osImageVersion: osInfo.ProductName, memoryPhysicalCapacityBytes: memory, startTime: time.Now(), } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go index 7db39a98082..861ab887991 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go @@ -54,11 +54,6 @@ func newPerfCounter(counter string) (*perfCounter, error) { return nil, errors.New("unable to open query through DLL call") } - ret = win_pdh.PdhValidatePath(counter) - if ret != win_pdh.ERROR_SUCCESS { - return nil, fmt.Errorf("unable to valid path to counter. Error code is %x", ret) - } - ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle) if ret != win_pdh.ERROR_SUCCESS { return nil, fmt.Errorf("unable to add process counter. Error code is %x", ret) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go index 8020b297368..7f38cc1cea7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go @@ -20,96 +20,63 @@ package winstats import ( "fmt" - "unsafe" - - "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) -// getCurrentVersionVal gets value of specified key from registry. -func getCurrentVersionVal(key string) (string, error) { - var h windows.Handle - if err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { - return "", err - } - defer windows.RegCloseKey(h) - - var buf [128]uint16 - var typ uint32 - n := uint32(len(buf) * int(unsafe.Sizeof(buf[0]))) // api expects array of bytes, not uint16 - if err := windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr(key), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return "", err - } - - return windows.UTF16ToString(buf[:]), nil +//OSInfo is a convenience class for retrieving Windows OS information +type OSInfo struct { + BuildNumber, ProductName string + MajorVersion, MinorVersion, UBR uint64 } -// getVersionRevision gets revision from UBR registry. -func getVersionRevision() (uint16, error) { - revisionString, err := getCurrentVersionVal("UBR") +// GetOSInfo reads Windows version information from the registry +func GetOSInfo() (*OSInfo, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) if err != nil { - return 0, err + return nil, err } + defer k.Close() - revision, err := windows.UTF16FromString(revisionString) + buildNumber, _, err := k.GetStringValue("CurrentBuildNumber") if err != nil { - return 0, err + return nil, err } - return revision[0], nil -} - -// getKernelVersion gets the version of windows kernel. -func getKernelVersion() (string, error) { - // Get CurrentBuildNumber. - buildNumber, err := getCurrentVersionVal("CurrentBuildNumber") + majorVersionNumber, _, err := k.GetIntegerValue("CurrentMajorVersionNumber") if err != nil { - return "", err + return nil, err } - // Get CurrentMajorVersionNumber. - majorVersionNumberString, err := getCurrentVersionVal("CurrentMajorVersionNumber") + minorVersionNumber, _, err := k.GetIntegerValue("CurrentMinorVersionNumber") if err != nil { - return "", err - } - majorVersionNumber, err := windows.UTF16FromString(majorVersionNumberString) - if err != nil { - return "", err + return nil, err } - // Get CurrentMinorVersionNumber. - minorVersionNumberString, err := getCurrentVersionVal("CurrentMinorVersionNumber") - if err != nil { - return "", err - } - minorVersionNumber, err := windows.UTF16FromString(minorVersionNumberString) + revision, _, err := k.GetIntegerValue("UBR") if err != nil { - return "", err + return nil, err } - // Get UBR. - revision, err := getVersionRevision() + productName, _, err := k.GetStringValue("ProductName") if err != nil { - return "", err + return nil, nil } - return fmt.Sprintf("%d.%d.%s.%d", majorVersionNumber[0], minorVersionNumber[0], buildNumber, revision), nil + return &OSInfo{ + BuildNumber: buildNumber, + ProductName: productName, + MajorVersion: majorVersionNumber, + MinorVersion: minorVersionNumber, + UBR: revision, + }, nil } -// getOSImageVersion gets the osImage name and version. -func getOSImageVersion() (string, error) { - productName, err := getCurrentVersionVal("ProductName") - if err != nil { - return "", nil - } +//GetPatchVersion returns full OS version with patch +func (o *OSInfo) GetPatchVersion() string { + return fmt.Sprintf("%d.%d.%s.%d", o.MajorVersion, o.MinorVersion, o.BuildNumber, o.UBR) +} - return productName, nil +//GetBuild returns OS version upto build number +func (o *OSInfo) GetBuild() string { + return fmt.Sprintf("%d.%d.%s", o.MajorVersion, o.MinorVersion, o.BuildNumber) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD index ccb01672b27..8db94cf6a0e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD @@ -28,7 +28,6 @@ go_library( "//pkg/proxy/config:go_default_library", "//pkg/proxy/iptables:go_default_library", "//pkg/util/iptables:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/sysctl:go_default_library", @@ -70,6 +69,7 @@ go_library( "//test/utils:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS index 329e8801dc9..f9874e1757c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS @@ -3,8 +3,10 @@ reviewers: - gmarek - shyamjvs + - sig-scalability-reviewers - wojtek-t approvers: - gmarek - shyamjvs + - sig-scalability-approvers - wojtek-t diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go index a97dd18e468..3b3706f7f32 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go @@ -20,6 +20,9 @@ import ( "fmt" "time" + "k8s.io/klog" + "k8s.io/utils/mount" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" @@ -31,7 +34,6 @@ import ( containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/dockershim" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/cephfs" @@ -57,8 +59,6 @@ import ( "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/test/utils" - - "k8s.io/klog" ) type HollowKubelet struct { @@ -134,15 +134,19 @@ func (hk *HollowKubelet) Run() { select {} } +// HollowKubletOptions contains settable parameters for hollow kubelet. +type HollowKubletOptions struct { + NodeName string + KubeletPort int + KubeletReadOnlyPort int + MaxPods int + PodsPerCore int + NodeLabels map[string]string +} + // Builds a KubeletConfiguration for the HollowKubelet, ensuring that the // usual defaults are applied for fields we do not override. -func GetHollowKubeletConfig( - nodeName string, - kubeletPort int, - kubeletReadOnlyPort int, - maxPods int, - podsPerCore int) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) { - +func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) { testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "") podFilePath := utils.MakeTempDirOrDie("static-pods", testRootDir) klog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) @@ -151,13 +155,13 @@ func GetHollowKubeletConfig( f := options.NewKubeletFlags() f.EnableServer = true f.RootDirectory = testRootDir - f.HostnameOverride = nodeName + f.HostnameOverride = opt.NodeName f.MinimumGCAge = metav1.Duration{Duration: 1 * time.Minute} f.MaxContainerCount = 100 f.MaxPerPodContainerCount = 2 f.RegisterNode = true f.RegisterSchedulable = true - f.ProviderID = fmt.Sprintf("kubemark://%v", nodeName) + f.ProviderID = fmt.Sprintf("kubemark://%v", opt.NodeName) // Config struct c, err := options.NewKubeletConfiguration() @@ -167,17 +171,17 @@ func GetHollowKubeletConfig( c.StaticPodURL = "" c.Address = "0.0.0.0" /* bind address */ - c.Port = int32(kubeletPort) - c.ReadOnlyPort = int32(kubeletReadOnlyPort) + c.Port = int32(opt.KubeletPort) + c.ReadOnlyPort = int32(opt.KubeletReadOnlyPort) c.StaticPodPath = podFilePath c.FileCheckFrequency.Duration = 20 * time.Second c.HTTPCheckFrequency.Duration = 20 * time.Second c.NodeStatusUpdateFrequency.Duration = 10 * time.Second - c.NodeStatusReportFrequency.Duration = time.Minute + c.NodeStatusReportFrequency.Duration = 5 * time.Minute c.SyncFrequency.Duration = 10 * time.Second c.EvictionPressureTransitionPeriod.Duration = 5 * time.Minute - c.MaxPods = int32(maxPods) - c.PodsPerCore = int32(podsPerCore) + c.MaxPods = int32(opt.MaxPods) + c.PodsPerCore = int32(opt.PodsPerCore) c.ClusterDNS = []string{} c.ImageGCHighThresholdPercent = 90 c.ImageGCLowThresholdPercent = 80 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go index 11c9fe2889f..448a86bb9d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go @@ -18,6 +18,7 @@ package kubemark import ( "fmt" + "net" "time" v1 "k8s.io/api/core/v1" @@ -44,6 +45,7 @@ type HollowProxy struct { type FakeProxier struct { proxyconfig.NoopEndpointSliceHandler + proxyconfig.NoopNodeHandler } func (*FakeProxier) Sync() {} @@ -77,6 +79,11 @@ func NewHollowProxyOrDie( var err error if useRealProxier { + nodeIP := utilnode.GetNodeIP(client, nodeName) + if nodeIP == nil { + klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1") + nodeIP = net.ParseIP("127.0.0.1") + } // Real proxier with fake iptables, sysctl, etc underneath it. //var err error proxier, err = iptables.NewProxier( @@ -89,7 +96,7 @@ func NewHollowProxyOrDie( 0, "10.0.0.0/8", nodeName, - utilnode.GetNodeIP(client, nodeName), + nodeIP, recorder, nil, []string{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD index 317dc0fc0cd..a22faf032c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD @@ -11,6 +11,7 @@ go_library( srcs = ["exec.go"], importpath = "k8s.io/kubernetes/pkg/probe/exec", deps = [ + "//pkg/kubelet/util/ioutils:go_default_library", "//pkg/probe:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go index a6ae523aa62..b8cfe0d2ad7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go @@ -17,10 +17,17 @@ limitations under the License. package exec import ( + "bytes" + + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/probe" - "k8s.io/utils/exec" "k8s.io/klog" + "k8s.io/utils/exec" +) + +const ( + maxReadLength = 10 * 1 << 10 // 10KB ) // New creates a Prober. @@ -39,7 +46,17 @@ type execProber struct{} // from executing a command. Returns the Result status, command output, and // errors if any. func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) { - data, err := e.CombinedOutput() + var dataBuffer bytes.Buffer + writer := ioutils.LimitWriter(&dataBuffer, maxReadLength) + + e.SetStderr(writer) + e.SetStdout(writer) + err := e.Start() + if err == nil { + err = e.Wait() + } + data := dataBuffer.Bytes() + klog.V(4).Infof("Exec probe response: %q", string(data)) if err != nil { exit, ok := err.(exec.ExitError) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD index b92ac48587d..3541c2569d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/probe/http", deps = [ "//pkg/probe:go_default_library", - "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/io:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go index 8dc8bb46bbd..84aae213195 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go @@ -25,8 +25,8 @@ import ( "time" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/component-base/version" "k8s.io/kubernetes/pkg/probe" - "k8s.io/kubernetes/pkg/version" "k8s.io/klog" utilio "k8s.io/utils/io" @@ -113,7 +113,11 @@ func DoHTTPProbe(url *url.URL, headers http.Header, client GetHTTPInterface) (pr defer res.Body.Close() b, err := utilio.ReadAtMost(res.Body, maxRespBodyLength) if err != nil { - return probe.Failure, "", err + if err == utilio.ErrLimitReached { + klog.V(4).Infof("Non fatal body truncation for %s, Response: %v", url.String(), *res) + } else { + return probe.Failure, "", err + } } body := string(b) if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD index 761d0aec8b9..0057cdf8fc1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD @@ -13,6 +13,7 @@ go_library( "endpoints.go", "endpointslicecache.go", "service.go", + "topology.go", "types.go", ], importpath = "k8s.io/kubernetes/pkg/proxy", @@ -22,7 +23,7 @@ go_library( "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", @@ -62,11 +63,12 @@ go_test( "endpoints_test.go", "endpointslicecache_test.go", "service_test.go", + "topology_test.go", ], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go index e2f408bd8ac..75e8082b25e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go @@ -29,7 +29,7 @@ var ( Scheme = runtime.NewScheme() // Codecs provides methods for retrieving codecs and serializers for specific // versions and content types. - Codecs = serializer.NewCodecFactory(Scheme) + Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) ) func init() { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go index 991a9096b6e..b1314fc2e88 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go @@ -84,7 +84,7 @@ type KubeProxyWinkernelConfiguration struct { // networkName is the name of the network kube-proxy will use // to create endpoints and policies NetworkName string - // sourceVip is the IP address of the source VIP endoint used for + // sourceVip is the IP address of the source VIP endpoint used for // NAT when loadbalancing SourceVip string // enableDSR tells kube-proxy whether HNS policies should be created diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD index 92881120869..b8223cbf40a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD @@ -15,10 +15,10 @@ go_library( importpath = "k8s.io/kubernetes/pkg/proxy/config", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/informers/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go index 76947e73327..387ca05e971 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go @@ -21,10 +21,10 @@ import ( "time" "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" - discoveryinformers "k8s.io/client-go/informers/discovery/v1alpha1" + discoveryinformers "k8s.io/client-go/informers/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/klog" ) @@ -369,3 +369,128 @@ func (c *ServiceConfig) handleDeleteService(obj interface{}) { c.eventHandlers[i].OnServiceDelete(service) } } + +// NodeHandler is an abstract interface of objects which receive +// notifications about node object changes. +type NodeHandler interface { + // OnNodeAdd is called whenever creation of new node object + // is observed. + OnNodeAdd(node *v1.Node) + // OnNodeUpdate is called whenever modification of an existing + // node object is observed. + OnNodeUpdate(oldNode, node *v1.Node) + // OnNodeDelete is called whever deletion of an existing node + // object is observed. + OnNodeDelete(node *v1.Node) + // OnNodeSynced is called once all the initial event handlers were + // called and the state is fully propagated to local cache. + OnNodeSynced() +} + +// NoopNodeHandler is a noop handler for proxiers that have not yet +// implemented a full NodeHandler. +type NoopNodeHandler struct{} + +// OnNodeAdd is a noop handler for Node creates. +func (*NoopNodeHandler) OnNodeAdd(node *v1.Node) {} + +// OnNodeUpdate is a noop handler for Node updates. +func (*NoopNodeHandler) OnNodeUpdate(oldNode, node *v1.Node) {} + +// OnNodeDelete is a noop handler for Node deletes. +func (*NoopNodeHandler) OnNodeDelete(node *v1.Node) {} + +// OnNodeSynced is a noop handler for Node syncs. +func (*NoopNodeHandler) OnNodeSynced() {} + +// NodeConfig tracks a set of node configurations. +// It accepts "set", "add" and "remove" operations of node via channels, and invokes registered handlers on change. +type NodeConfig struct { + listerSynced cache.InformerSynced + eventHandlers []NodeHandler +} + +// NewNodeConfig creates a new NodeConfig. +func NewNodeConfig(nodeInformer coreinformers.NodeInformer, resyncPeriod time.Duration) *NodeConfig { + result := &NodeConfig{ + listerSynced: nodeInformer.Informer().HasSynced, + } + + nodeInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: result.handleAddNode, + UpdateFunc: result.handleUpdateNode, + DeleteFunc: result.handleDeleteNode, + }, + resyncPeriod, + ) + + return result +} + +// RegisterEventHandler registers a handler which is called on every node change. +func (c *NodeConfig) RegisterEventHandler(handler NodeHandler) { + c.eventHandlers = append(c.eventHandlers, handler) +} + +// Run starts the goroutine responsible for calling registered handlers. +func (c *NodeConfig) Run(stopCh <-chan struct{}) { + klog.Info("Starting node config controller") + + if !cache.WaitForNamedCacheSync("node config", stopCh, c.listerSynced) { + return + } + + for i := range c.eventHandlers { + klog.V(3).Infof("Calling handler.OnNodeSynced()") + c.eventHandlers[i].OnNodeSynced() + } +} + +func (c *NodeConfig) handleAddNode(obj interface{}) { + node, ok := obj.(*v1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) + return + } + for i := range c.eventHandlers { + klog.V(4).Infof("Calling handler.OnNodeAdd") + c.eventHandlers[i].OnNodeAdd(node) + } +} + +func (c *NodeConfig) handleUpdateNode(oldObj, newObj interface{}) { + oldNode, ok := oldObj.(*v1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj)) + return + } + node, ok := newObj.(*v1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj)) + return + } + for i := range c.eventHandlers { + klog.V(5).Infof("Calling handler.OnNodeUpdate") + c.eventHandlers[i].OnNodeUpdate(oldNode, node) + } +} + +func (c *NodeConfig) handleDeleteNode(obj interface{}) { + node, ok := obj.(*v1.Node) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) + return + } + if node, ok = tombstone.Obj.(*v1.Node); !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) + return + } + } + for i := range c.eventHandlers { + klog.V(4).Infof("Calling handler.OnNodeDelete") + c.eventHandlers[i].OnNodeDelete(node) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go index 9bf1bdd4a25..35c2fc58893 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go @@ -26,7 +26,7 @@ import ( "k8s.io/klog" v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" @@ -35,6 +35,12 @@ import ( utilnet "k8s.io/utils/net" ) +var supportedEndpointSliceAddressTypes = sets.NewString( + string(discovery.AddressTypeIP), // IP is a deprecated address type + string(discovery.AddressTypeIPv4), + string(discovery.AddressTypeIPv6), +) + // BaseEndpointInfo contains base information that defines an endpoint. // This could be used directly by proxier while processing endpoints, // or can be used for constructing a more specific EndpointInfo struct @@ -42,7 +48,8 @@ import ( type BaseEndpointInfo struct { Endpoint string // TODO: should be an endpointString type // IsLocal indicates whether the endpoint is running in same host as kube-proxy. - IsLocal bool + IsLocal bool + Topology map[string]string } var _ Endpoint = &BaseEndpointInfo{} @@ -57,6 +64,11 @@ func (info *BaseEndpointInfo) GetIsLocal() bool { return info.IsLocal } +// GetTopology returns the topology information of the endpoint. +func (info *BaseEndpointInfo) GetTopology() map[string]string { + return info.Topology +} + // IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. func (info *BaseEndpointInfo) IP() string { return utilproxy.IPPart(info.Endpoint) @@ -72,10 +84,11 @@ func (info *BaseEndpointInfo) Equal(other Endpoint) bool { return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal() } -func newBaseEndpointInfo(IP string, port int, isLocal bool) *BaseEndpointInfo { +func newBaseEndpointInfo(IP string, port int, isLocal bool, topology map[string]string) *BaseEndpointInfo { return &BaseEndpointInfo{ Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), IsLocal: isLocal, + Topology: topology, } } @@ -92,7 +105,7 @@ type EndpointChangeTracker struct { items map[types.NamespacedName]*endpointsChange // makeEndpointInfo allows proxier to inject customized information when processing endpoint. makeEndpointInfo makeEndpointFunc - // endpointSliceCache holds a simplified version of endpoint slices + // endpointSliceCache holds a simplified version of endpoint slices. endpointSliceCache *EndpointSliceCache // isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable. isIPv6Mode *bool @@ -173,52 +186,76 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool { // It returns true if items changed, otherwise return false. Will add/update/delete items of EndpointsChangeMap. // If removeSlice is true, slice will be removed, otherwise it will be added or updated. func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.EndpointSlice, removeSlice bool) bool { + if !supportedEndpointSliceAddressTypes.Has(string(endpointSlice.AddressType)) { + klog.V(4).Infof("EndpointSlice address type not supported by kube-proxy: %s", endpointSlice.AddressType) + return false + } + // This should never happen if endpointSlice == nil { klog.Error("Nil endpointSlice passed to EndpointSliceUpdate") return false } - namespacedName, _ := endpointSliceCacheKeys(endpointSlice) + namespacedName, _, err := endpointSliceCacheKeys(endpointSlice) + if err != nil { + klog.Warningf("Error getting endpoint slice cache keys: %v", err) + return false + } metrics.EndpointChangesTotal.Inc() ect.lock.Lock() defer ect.lock.Unlock() - change, ok := ect.items[namespacedName] - if !ok { - change = &endpointsChange{} - change.previous = ect.endpointSliceCache.EndpointsMap(namespacedName) - ect.items[namespacedName] = change - } + changeNeeded := ect.endpointSliceCache.updatePending(endpointSlice, removeSlice) - if removeSlice { - ect.endpointSliceCache.Delete(endpointSlice) - } else { - ect.endpointSliceCache.Update(endpointSlice) + if changeNeeded { + metrics.EndpointChangesPending.Inc() + if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() { + ect.lastChangeTriggerTimes[namespacedName] = + append(ect.lastChangeTriggerTimes[namespacedName], t) + } } - if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() { - ect.lastChangeTriggerTimes[namespacedName] = - append(ect.lastChangeTriggerTimes[namespacedName], t) + return changeNeeded +} + +// checkoutChanges returns a list of pending endpointsChanges and marks them as +// applied. +func (ect *EndpointChangeTracker) checkoutChanges() []*endpointsChange { + ect.lock.Lock() + defer ect.lock.Unlock() + + metrics.EndpointChangesPending.Set(0) + + if ect.endpointSliceCache != nil { + return ect.endpointSliceCache.checkoutChanges() } - change.current = ect.endpointSliceCache.EndpointsMap(namespacedName) - // if change.previous equal to change.current, it means no change - if reflect.DeepEqual(change.previous, change.current) { - delete(ect.items, namespacedName) - // Reset the lastChangeTriggerTimes for this service. Given that the network programming - // SLI is defined as the duration between a time of an event and a time when the network was - // programmed to incorporate that event, if there are events that happened between two - // consecutive syncs and that canceled each other out, e.g. pod A added -> pod A deleted, - // there will be no network programming for them and thus no network programming latency metric - // should be exported. - delete(ect.lastChangeTriggerTimes, namespacedName) + changes := []*endpointsChange{} + for _, change := range ect.items { + changes = append(changes, change) } + ect.items = make(map[types.NamespacedName]*endpointsChange) + return changes +} - metrics.EndpointChangesPending.Set(float64(len(ect.items))) - return len(ect.items) > 0 +// checkoutTriggerTimes applies the locally cached trigger times to a map of +// trigger times that have been passed in and empties the local cache. +func (ect *EndpointChangeTracker) checkoutTriggerTimes(lastChangeTriggerTimes *map[types.NamespacedName][]time.Time) { + ect.lock.Lock() + defer ect.lock.Unlock() + + for k, v := range ect.lastChangeTriggerTimes { + prev, ok := (*lastChangeTriggerTimes)[k] + if !ok { + (*lastChangeTriggerTimes)[k] = v + } else { + (*lastChangeTriggerTimes)[k] = append(prev, v...) + } + } + ect.lastChangeTriggerTimes = make(map[types.NamespacedName][]time.Time) } // getLastChangeTriggerTime returns the time.Time value of the @@ -311,6 +348,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint svcPortName := ServicePortName{ NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: port.Name, + Protocol: port.Protocol, } for i := range ss.Addresses { addr := &ss.Addresses[i] @@ -323,11 +361,11 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint if ect.isIPv6Mode != nil && utilnet.IsIPv6String(addr.IP) != *ect.isIPv6Mode { // Emit event on the corresponding service which had a different // IP version than the endpoint. - utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Name, endpoints.Namespace, "") + utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Namespace, endpoints.Name, "") continue } isLocal := addr.NodeName != nil && *addr.NodeName == ect.hostname - baseEndpointInfo := newBaseEndpointInfo(addr.IP, int(port.Port), isLocal) + baseEndpointInfo := newBaseEndpointInfo(addr.IP, int(port.Port), isLocal, nil) if ect.makeEndpointInfo != nil { endpointsMap[svcPortName] = append(endpointsMap[svcPortName], ect.makeEndpointInfo(baseEndpointInfo)) } else { @@ -346,29 +384,19 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint // The changes map is cleared after applying them. // In addition it returns (via argument) and resets the lastChangeTriggerTimes for all endpoints // that were changed and will result in syncing the proxy rules. -func (em EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, +func (em EndpointsMap) apply(ect *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName, lastChangeTriggerTimes *map[types.NamespacedName][]time.Time) { - if changes == nil { + if ect == nil { return } - changes.lock.Lock() - defer changes.lock.Unlock() - for _, change := range changes.items { + + changes := ect.checkoutChanges() + for _, change := range changes { em.unmerge(change.previous) em.merge(change.current) detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames) } - changes.items = make(map[types.NamespacedName]*endpointsChange) - metrics.EndpointChangesPending.Set(0) - for k, v := range changes.lastChangeTriggerTimes { - prev, ok := (*lastChangeTriggerTimes)[k] - if !ok { - (*lastChangeTriggerTimes)[k] = v - } else { - (*lastChangeTriggerTimes)[k] = append(prev, v...) - } - } - changes.lastChangeTriggerTimes = make(map[types.NamespacedName][]time.Time) + ect.checkoutTriggerTimes(lastChangeTriggerTimes) } // Merge ensures that the current EndpointsMap contains all pairs from the EndpointsMap passed in. @@ -406,6 +434,10 @@ func (em EndpointsMap) getLocalEndpointIPs() map[types.NamespacedName]sets.Strin // is used to store stale udp service in order to clear udp conntrack later. func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) { for svcPortName, epList := range oldEndpointsMap { + if svcPortName.Protocol != v1.ProtocolUDP { + continue + } + for _, ep := range epList { stale := true for i := range newEndpointsMap[svcPortName] { @@ -422,6 +454,10 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, stale } for svcPortName, epList := range newEndpointsMap { + if svcPortName.Protocol != v1.ProtocolUDP { + continue + } + // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { *staleServiceNames = append(*staleServiceNames, svcPortName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go index 229b7744424..e9c6165353b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go @@ -17,10 +17,14 @@ limitations under the License. package proxy import ( + "fmt" + "reflect" "sort" + "strings" + "sync" "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog" @@ -30,24 +34,41 @@ import ( // EndpointSliceCache is used as a cache of EndpointSlice information. type EndpointSliceCache struct { - // sliceByServiceMap is the basis of this cache. It contains endpoint slice - // info grouped by service name and endpoint slice name. The first key - // represents a namespaced service name while the second key represents + // lock protects trackerByServiceMap. + lock sync.Mutex + + // trackerByServiceMap is the basis of this cache. It contains endpoint + // slice trackers grouped by service name and endpoint slice name. The first + // key represents a namespaced service name while the second key represents // an endpoint slice name. Since endpoints can move between slices, we // require slice specific caching to prevent endpoints being removed from // the cache when they may have just moved to a different slice. - sliceByServiceMap map[types.NamespacedName]map[string]*endpointSliceInfo - makeEndpointInfo makeEndpointFunc - hostname string - isIPv6Mode *bool - recorder record.EventRecorder + trackerByServiceMap map[types.NamespacedName]*endpointSliceTracker + + makeEndpointInfo makeEndpointFunc + hostname string + isIPv6Mode *bool + recorder record.EventRecorder } +// endpointSliceTracker keeps track of EndpointSlices as they have been applied +// by a proxier along with any pending EndpointSlices that have been updated +// in this cache but not yet applied by a proxier. +type endpointSliceTracker struct { + applied endpointSliceInfoByName + pending endpointSliceInfoByName +} + +// endpointSliceInfoByName groups endpointSliceInfo by the names of the +// corresponding EndpointSlices. +type endpointSliceInfoByName map[string]*endpointSliceInfo + // endpointSliceInfo contains just the attributes kube-proxy cares about. // Used for caching. Intentionally small to limit memory util. type endpointSliceInfo struct { Ports []discovery.EndpointPort Endpoints []*endpointInfo + Remove bool } // endpointInfo contains just the attributes kube-proxy cares about. @@ -58,68 +79,134 @@ type endpointInfo struct { Topology map[string]string } +// spToEndpointMap stores groups Endpoint objects by ServicePortName and +// EndpointSlice name. +type spToEndpointMap map[ServicePortName]map[string]Endpoint + // NewEndpointSliceCache initializes an EndpointSliceCache. func NewEndpointSliceCache(hostname string, isIPv6Mode *bool, recorder record.EventRecorder, makeEndpointInfo makeEndpointFunc) *EndpointSliceCache { if makeEndpointInfo == nil { makeEndpointInfo = standardEndpointInfo } return &EndpointSliceCache{ - sliceByServiceMap: map[types.NamespacedName]map[string]*endpointSliceInfo{}, - hostname: hostname, - isIPv6Mode: isIPv6Mode, - makeEndpointInfo: makeEndpointInfo, - recorder: recorder, + trackerByServiceMap: map[types.NamespacedName]*endpointSliceTracker{}, + hostname: hostname, + isIPv6Mode: isIPv6Mode, + makeEndpointInfo: makeEndpointInfo, + recorder: recorder, } } -// standardEndpointInfo is the default makeEndpointFunc. -func standardEndpointInfo(ep *BaseEndpointInfo) Endpoint { - return ep +// newEndpointSliceTracker initializes an endpointSliceTracker. +func newEndpointSliceTracker() *endpointSliceTracker { + return &endpointSliceTracker{ + applied: endpointSliceInfoByName{}, + pending: endpointSliceInfoByName{}, + } } -// Update a slice in the cache. -func (cache *EndpointSliceCache) Update(endpointSlice *discovery.EndpointSlice) { - serviceKey, sliceKey := endpointSliceCacheKeys(endpointSlice) - // This should never actually happen - if serviceKey.Name == "" || serviceKey.Namespace == "" || sliceKey == "" { - klog.Errorf("Invalid endpoint slice, name and owner reference required %v", endpointSlice) - return - } +// newEndpointSliceInfo generates endpointSliceInfo from an EndpointSlice. +func newEndpointSliceInfo(endpointSlice *discovery.EndpointSlice, remove bool) *endpointSliceInfo { esInfo := &endpointSliceInfo{ Ports: endpointSlice.Ports, Endpoints: []*endpointInfo{}, + Remove: remove, } - for _, endpoint := range endpointSlice.Endpoints { - if endpoint.Conditions.Ready == nil || *endpoint.Conditions.Ready == true { - esInfo.Endpoints = append(esInfo.Endpoints, &endpointInfo{ - Addresses: endpoint.Addresses, - Topology: endpoint.Topology, - }) + + sort.Sort(byPort(esInfo.Ports)) + + if !remove { + for _, endpoint := range endpointSlice.Endpoints { + if endpoint.Conditions.Ready == nil || *endpoint.Conditions.Ready { + esInfo.Endpoints = append(esInfo.Endpoints, &endpointInfo{ + Addresses: endpoint.Addresses, + Topology: endpoint.Topology, + }) + } } + + sort.Sort(byAddress(esInfo.Endpoints)) + } + + return esInfo +} + +// standardEndpointInfo is the default makeEndpointFunc. +func standardEndpointInfo(ep *BaseEndpointInfo) Endpoint { + return ep +} + +// updatePending updates a pending slice in the cache. +func (cache *EndpointSliceCache) updatePending(endpointSlice *discovery.EndpointSlice, remove bool) bool { + serviceKey, sliceKey, err := endpointSliceCacheKeys(endpointSlice) + if err != nil { + klog.Warningf("Error getting endpoint slice cache keys: %v", err) + return false } - if _, exists := cache.sliceByServiceMap[serviceKey]; !exists { - cache.sliceByServiceMap[serviceKey] = map[string]*endpointSliceInfo{} + + esInfo := newEndpointSliceInfo(endpointSlice, remove) + + cache.lock.Lock() + defer cache.lock.Unlock() + + if _, ok := cache.trackerByServiceMap[serviceKey]; !ok { + cache.trackerByServiceMap[serviceKey] = newEndpointSliceTracker() + } + + changed := cache.esInfoChanged(serviceKey, sliceKey, esInfo) + + if changed { + cache.trackerByServiceMap[serviceKey].pending[sliceKey] = esInfo } - cache.sliceByServiceMap[serviceKey][sliceKey] = esInfo + + return changed } -// Delete a slice from the cache. -func (cache *EndpointSliceCache) Delete(endpointSlice *discovery.EndpointSlice) { - serviceKey, sliceKey := endpointSliceCacheKeys(endpointSlice) - delete(cache.sliceByServiceMap[serviceKey], sliceKey) +// checkoutChanges returns a list of all endpointsChanges that are +// pending and then marks them as applied. +func (cache *EndpointSliceCache) checkoutChanges() []*endpointsChange { + changes := []*endpointsChange{} + + cache.lock.Lock() + defer cache.lock.Unlock() + + for serviceNN, esTracker := range cache.trackerByServiceMap { + if len(esTracker.pending) == 0 { + continue + } + + change := &endpointsChange{} + + change.previous = cache.getEndpointsMap(serviceNN, esTracker.applied) + + for name, sliceInfo := range esTracker.pending { + if sliceInfo.Remove { + delete(esTracker.applied, name) + } else { + esTracker.applied[name] = sliceInfo + } + + delete(esTracker.pending, name) + } + + change.current = cache.getEndpointsMap(serviceNN, esTracker.applied) + changes = append(changes, change) + } + + return changes } -// EndpointsMap computes an EndpointsMap for a given service. -func (cache *EndpointSliceCache) EndpointsMap(serviceNN types.NamespacedName) EndpointsMap { - endpointInfoBySP := cache.endpointInfoByServicePort(serviceNN) +// getEndpointsMap computes an EndpointsMap for a given set of EndpointSlices. +func (cache *EndpointSliceCache) getEndpointsMap(serviceNN types.NamespacedName, sliceInfoByName endpointSliceInfoByName) EndpointsMap { + endpointInfoBySP := cache.endpointInfoByServicePort(serviceNN, sliceInfoByName) return endpointsMapFromEndpointInfo(endpointInfoBySP) } // endpointInfoByServicePort groups endpoint info by service port name and address. -func (cache *EndpointSliceCache) endpointInfoByServicePort(serviceNN types.NamespacedName) map[ServicePortName]map[string]Endpoint { - endpointInfoBySP := map[ServicePortName]map[string]Endpoint{} +func (cache *EndpointSliceCache) endpointInfoByServicePort(serviceNN types.NamespacedName, sliceInfoByName endpointSliceInfoByName) spToEndpointMap { + endpointInfoBySP := spToEndpointMap{} - for _, sliceInfo := range cache.sliceByServiceMap[serviceNN] { + for _, sliceInfo := range sliceInfoByName { for _, port := range sliceInfo.Ports { if port.Name == nil { klog.Warningf("ignoring port with nil name %v", port) @@ -131,8 +218,11 @@ func (cache *EndpointSliceCache) endpointInfoByServicePort(serviceNN types.Names continue } - svcPortName := ServicePortName{NamespacedName: serviceNN} - svcPortName.Port = *port.Name + svcPortName := ServicePortName{ + NamespacedName: serviceNN, + Port: *port.Name, + Protocol: *port.Protocol, + } endpointInfoBySP[svcPortName] = cache.addEndpointsByIP(serviceNN, int(*port.Port), endpointInfoBySP[svcPortName], sliceInfo.Endpoints) } @@ -159,12 +249,12 @@ func (cache *EndpointSliceCache) addEndpointsByIP(serviceNN types.NamespacedName if cache.isIPv6Mode != nil && utilnet.IsIPv6String(endpoint.Addresses[0]) != *cache.isIPv6Mode { // Emit event on the corresponding service which had a different IP // version than the endpoint. - utilproxy.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], serviceNN.Name, serviceNN.Namespace, "") + utilproxy.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], serviceNN.Namespace, serviceNN.Name, "") continue } isLocal := cache.isLocal(endpoint.Topology[v1.LabelHostname]) - endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal) + endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal, endpoint.Topology) // This logic ensures we're deduping potential overlapping endpoints // isLocal should not vary between matching IPs, but if it does, we @@ -181,6 +271,36 @@ func (cache *EndpointSliceCache) isLocal(hostname string) bool { return len(cache.hostname) > 0 && hostname == cache.hostname } +// esInfoChanged returns true if the esInfo parameter should be set as a new +// pending value in the cache. +func (cache *EndpointSliceCache) esInfoChanged(serviceKey types.NamespacedName, sliceKey string, esInfo *endpointSliceInfo) bool { + if _, ok := cache.trackerByServiceMap[serviceKey]; ok { + appliedInfo, appliedOk := cache.trackerByServiceMap[serviceKey].applied[sliceKey] + pendingInfo, pendingOk := cache.trackerByServiceMap[serviceKey].pending[sliceKey] + + // If there's already a pending value, return whether or not this would + // change that. + if pendingOk { + return !reflect.DeepEqual(esInfo, pendingInfo) + } + + // If there's already an applied value, return whether or not this would + // change that. + if appliedOk { + return !reflect.DeepEqual(esInfo, appliedInfo) + } + } + + // If this is marked for removal and does not exist in the cache, no changes + // are necessary. + if esInfo.Remove { + return false + } + + // If not in the cache, and not marked for removal, it should be added. + return true +} + // endpointsMapFromEndpointInfo computes an endpointsMap from endpointInfo that // has been grouped by service port and IP. func endpointsMapFromEndpointInfo(endpointInfoBySP map[ServicePortName]map[string]Endpoint) EndpointsMap { @@ -214,16 +334,28 @@ func formatEndpointsList(endpoints []Endpoint) []string { } // endpointSliceCacheKeys returns cache keys used for a given EndpointSlice. -func endpointSliceCacheKeys(endpointSlice *discovery.EndpointSlice) (types.NamespacedName, string) { - if len(endpointSlice.OwnerReferences) == 0 { - klog.Errorf("No owner reference set on endpoint slice: %s", endpointSlice.Name) - return types.NamespacedName{}, endpointSlice.Name - } - if len(endpointSlice.OwnerReferences) > 1 { - klog.Errorf("More than 1 owner reference set on endpoint slice: %s", endpointSlice.Name) +func endpointSliceCacheKeys(endpointSlice *discovery.EndpointSlice) (types.NamespacedName, string, error) { + var err error + serviceName, ok := endpointSlice.Labels[discovery.LabelServiceName] + if !ok || serviceName == "" { + err = fmt.Errorf("No %s label set on endpoint slice: %s", discovery.LabelServiceName, endpointSlice.Name) + } else if endpointSlice.Namespace == "" || endpointSlice.Name == "" { + err = fmt.Errorf("Expected EndpointSlice name and namespace to be set: %v", endpointSlice) } - ownerRef := endpointSlice.OwnerReferences[0] - return types.NamespacedName{Namespace: endpointSlice.Namespace, Name: ownerRef.Name}, endpointSlice.Name + return types.NamespacedName{Namespace: endpointSlice.Namespace, Name: serviceName}, endpointSlice.Name, err +} + +// byAddress helps sort endpointInfo +type byAddress []*endpointInfo + +func (e byAddress) Len() int { + return len(e) +} +func (e byAddress) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} +func (e byAddress) Less(i, j int) bool { + return strings.Join(e[i].Addresses, ",") < strings.Join(e[j].Addresses, ",") } // byIP helps sort endpoints by IP @@ -236,5 +368,18 @@ func (e byIP) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e byIP) Less(i, j int) bool { - return e[i].IP() < e[j].IP() + return e[i].String() < e[j].String() +} + +// byPort helps sort EndpointSlice ports by port number +type byPort []discovery.EndpointPort + +func (p byPort) Len() int { + return len(p) +} +func (p byPort) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} +func (p byPort) Less(i, j int) bool { + return *p[i].Port < *p[j].Port } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD index d136039b211..d99782630f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD @@ -9,8 +9,10 @@ load( go_library( name = "go_default_library", srcs = [ + "common.go", "doc.go", - "healthcheck.go", + "proxier_health.go", + "service_health.go", ], importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck", deps = [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go new file mode 100644 index 00000000000..f65cf1666e8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthcheck + +import ( + "net" + "net/http" +) + +// listener allows for testing of ServiceHealthServer and ProxierHealthServer. +type listener interface { + // Listen is very much like net.Listen, except the first arg (network) is + // fixed to be "tcp". + Listen(addr string) (net.Listener, error) +} + +// httpServerFactory allows for testing of ServiceHealthServer and ProxierHealthServer. +type httpServerFactory interface { + // New creates an instance of a type satisfying HTTPServer. This is + // designed to include http.Server. + New(addr string, handler http.Handler) httpServer +} + +// httpServer allows for testing of ServiceHealthServer and ProxierHealthServer. +// It is designed so that http.Server satisfies this interface, +type httpServer interface { + Serve(listener net.Listener) error +} + +// Implement listener in terms of net.Listen. +type stdNetListener struct{} + +func (stdNetListener) Listen(addr string) (net.Listener, error) { + return net.Listen("tcp", addr) +} + +var _ listener = stdNetListener{} + +// Implement httpServerFactory in terms of http.Server. +type stdHTTPServerFactory struct{} + +func (stdHTTPServerFactory) New(addr string, handler http.Handler) httpServer { + return &http.Server{ + Addr: addr, + Handler: handler, + } +} + +var _ httpServerFactory = stdHTTPServerFactory{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/proxier_health.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/proxier_health.go new file mode 100644 index 00000000000..44dc873fac3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/proxier_health.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthcheck + +import ( + "fmt" + "net/http" + "sync/atomic" + "time" + + "k8s.io/klog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" + api "k8s.io/kubernetes/pkg/apis/core" +) + +var proxierHealthzRetryInterval = 60 * time.Second + +// ProxierHealthUpdater allows callers to update healthz timestamp only. +type ProxierHealthUpdater interface { + // QueuedUpdate should be called when the proxier receives a Service or Endpoints + // event containing information that requires updating service rules. + QueuedUpdate() + + // Updated should be called when the proxier has successfully updated the service + // rules to reflect the current state. + Updated() +} + +// ProxierHealthServer returns 200 "OK" by default. It verifies that the delay between +// QueuedUpdate() calls and Updated() calls never exceeds healthTimeout. +type ProxierHealthServer struct { + listener listener + httpFactory httpServerFactory + clock clock.Clock + + addr string + port int32 + healthTimeout time.Duration + recorder record.EventRecorder + nodeRef *v1.ObjectReference + + lastUpdated atomic.Value + lastQueued atomic.Value +} + +// NewProxierHealthServer returns a proxier health http server. +func NewProxierHealthServer(addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *ProxierHealthServer { + return newProxierHealthServer(stdNetListener{}, stdHTTPServerFactory{}, clock.RealClock{}, addr, healthTimeout, recorder, nodeRef) +} + +func newProxierHealthServer(listener listener, httpServerFactory httpServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *ProxierHealthServer { + return &ProxierHealthServer{ + listener: listener, + httpFactory: httpServerFactory, + clock: c, + addr: addr, + healthTimeout: healthTimeout, + recorder: recorder, + nodeRef: nodeRef, + } +} + +// Updated updates the lastUpdated timestamp. +func (hs *ProxierHealthServer) Updated() { + hs.lastUpdated.Store(hs.clock.Now()) +} + +// QueuedUpdate updates the lastQueued timestamp. +func (hs *ProxierHealthServer) QueuedUpdate() { + hs.lastQueued.Store(hs.clock.Now()) +} + +// Run starts the healthz http server and returns. +func (hs *ProxierHealthServer) Run() { + serveMux := http.NewServeMux() + serveMux.Handle("/healthz", healthzHandler{hs: hs}) + server := hs.httpFactory.New(hs.addr, serveMux) + + go wait.Until(func() { + klog.V(3).Infof("Starting goroutine for proxier healthz on %s", hs.addr) + + listener, err := hs.listener.Listen(hs.addr) + if err != nil { + msg := fmt.Sprintf("Failed to start proxier healthz on %s: %v", hs.addr, err) + if hs.recorder != nil { + hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartProxierHealthcheck", msg) + } + klog.Error(msg) + return + } + + if err := server.Serve(listener); err != nil { + klog.Errorf("Proxier healthz closed with error: %v", err) + return + } + klog.Error("Unexpected proxier healthz closed.") + }, proxierHealthzRetryInterval, wait.NeverStop) +} + +type healthzHandler struct { + hs *ProxierHealthServer +} + +func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + var lastQueued, lastUpdated time.Time + if val := h.hs.lastQueued.Load(); val != nil { + lastQueued = val.(time.Time) + } + if val := h.hs.lastUpdated.Load(); val != nil { + lastUpdated = val.(time.Time) + } + currentTime := h.hs.clock.Now() + + healthy := false + switch { + case lastUpdated.IsZero(): + // The proxy is healthy while it's starting up + // TODO: this makes it useless as a readinessProbe. Consider changing + // to only become healthy after the proxy is fully synced. + healthy = true + case lastUpdated.After(lastQueued): + // We've processed all updates + healthy = true + case currentTime.Sub(lastQueued) < h.hs.healthTimeout: + // There's an unprocessed update queued, but it's not late yet + healthy = true + } + + resp.Header().Set("Content-Type", "application/json") + resp.Header().Set("X-Content-Type-Options", "nosniff") + if !healthy { + resp.WriteHeader(http.StatusServiceUnavailable) + } else { + resp.WriteHeader(http.StatusOK) + + // In older releases, the returned "lastUpdated" time indicated the last + // time the proxier sync loop ran, even if nothing had changed. To + // preserve compatibility, we use the same semantics: the returned + // lastUpdated value is "recent" if the server is healthy. The kube-proxy + // metrics provide more detailed information. + lastUpdated = currentTime + + } + fmt.Fprintf(resp, fmt.Sprintf(`{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go similarity index 51% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go index 348ca311f8d..0dcb61adbd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go @@ -22,27 +22,21 @@ import ( "net/http" "strings" "sync" - "sync/atomic" - "time" "github.com/lithammer/dedent" "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" api "k8s.io/kubernetes/pkg/apis/core" ) -var nodeHealthzRetryInterval = 60 * time.Second - -// Server serves HTTP endpoints for each service name, with results +// ServiceHealthServer serves HTTP endpoints for each service name, with results // based on the endpoints. If there are 0 endpoints for a service, it returns a // 503 "Service Unavailable" error (telling LBs not to use this node). If there // are 1 or more endpoints, it returns a 200 "OK". -type Server interface { +type ServiceHealthServer interface { // Make the new set of services be active. Services that were open before // will be closed. Services that are new will be opened. Service that // existed and are in the new set will be left alone. The value of the map @@ -54,73 +48,26 @@ type Server interface { SyncEndpoints(newEndpoints map[types.NamespacedName]int) error } -// Listener allows for testing of Server. If the Listener argument -// to NewServer() is nil, the real net.Listen function will be used. -type Listener interface { - // Listen is very much like net.Listen, except the first arg (network) is - // fixed to be "tcp". - Listen(addr string) (net.Listener, error) -} - -// HTTPServerFactory allows for testing of Server. If the -// HTTPServerFactory argument to NewServer() is nil, the real -// http.Server type will be used. -type HTTPServerFactory interface { - // New creates an instance of a type satisfying HTTPServer. This is - // designed to include http.Server. - New(addr string, handler http.Handler) HTTPServer -} - -// HTTPServer allows for testing of Server. -type HTTPServer interface { - // Server is designed so that http.Server satisfies this interface, - Serve(listener net.Listener) error -} - -// NewServer allocates a new healthcheck server manager. If either -// of the injected arguments are nil, defaults will be used. -func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server { - if listener == nil { - listener = stdNetListener{} - } - if httpServerFactory == nil { - httpServerFactory = stdHTTPServerFactory{} - } +func newServiceHealthServer(hostname string, recorder record.EventRecorder, listener listener, factory httpServerFactory) ServiceHealthServer { return &server{ hostname: hostname, recorder: recorder, listener: listener, - httpFactory: httpServerFactory, + httpFactory: factory, services: map[types.NamespacedName]*hcInstance{}, } } -// Implement Listener in terms of net.Listen. -type stdNetListener struct{} - -func (stdNetListener) Listen(addr string) (net.Listener, error) { - return net.Listen("tcp", addr) -} - -var _ Listener = stdNetListener{} - -// Implement HTTPServerFactory in terms of http.Server. -type stdHTTPServerFactory struct{} - -func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer { - return &http.Server{ - Addr: addr, - Handler: handler, - } +// NewServiceHealthServer allocates a new service healthcheck server manager +func NewServiceHealthServer(hostname string, recorder record.EventRecorder) ServiceHealthServer { + return newServiceHealthServer(hostname, recorder, stdNetListener{}, stdHTTPServerFactory{}) } -var _ HTTPServerFactory = stdHTTPServerFactory{} - type server struct { hostname string recorder record.EventRecorder // can be nil - listener Listener - httpFactory HTTPServerFactory + listener listener + httpFactory httpServerFactory lock sync.RWMutex services map[types.NamespacedName]*hcInstance @@ -187,7 +134,7 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err type hcInstance struct { port uint16 listener net.Listener - server HTTPServer + server httpServer endpoints int // number of local endpoints for a service } @@ -210,6 +157,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { h.hcs.lock.RUnlock() resp.Header().Set("Content-Type", "application/json") + resp.Header().Set("X-Content-Type-Options", "nosniff") if count == 0 { resp.WriteHeader(http.StatusServiceUnavailable) } else { @@ -246,102 +194,20 @@ func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) erro return nil } -// HealthzUpdater allows callers to update healthz timestamp only. -type HealthzUpdater interface { - UpdateTimestamp() -} - -// HealthzServer returns 200 "OK" by default. Once timestamp has been -// updated, it verifies we don't exceed max no respond duration since -// last update. -type HealthzServer struct { - listener Listener - httpFactory HTTPServerFactory - clock clock.Clock - - addr string - port int32 - healthTimeout time.Duration - recorder record.EventRecorder - nodeRef *v1.ObjectReference - - lastUpdated atomic.Value -} - -// NewDefaultHealthzServer returns a default healthz http server. -func NewDefaultHealthzServer(addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer { - return newHealthzServer(nil, nil, nil, addr, healthTimeout, recorder, nodeRef) -} - -func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer { - if listener == nil { - listener = stdNetListener{} - } - if httpServerFactory == nil { - httpServerFactory = stdHTTPServerFactory{} - } - if c == nil { - c = clock.RealClock{} - } - return &HealthzServer{ - listener: listener, - httpFactory: httpServerFactory, - clock: c, - addr: addr, - healthTimeout: healthTimeout, - recorder: recorder, - nodeRef: nodeRef, - } -} - -// UpdateTimestamp updates the lastUpdated timestamp. -func (hs *HealthzServer) UpdateTimestamp() { - hs.lastUpdated.Store(hs.clock.Now()) -} - -// Run starts the healthz http server and returns. -func (hs *HealthzServer) Run() { - serveMux := http.NewServeMux() - serveMux.Handle("/healthz", healthzHandler{hs: hs}) - server := hs.httpFactory.New(hs.addr, serveMux) - - go wait.Until(func() { - klog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr) - - listener, err := hs.listener.Listen(hs.addr) - if err != nil { - msg := fmt.Sprintf("Failed to start node healthz on %s: %v", hs.addr, err) - if hs.recorder != nil { - hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg) - } - klog.Error(msg) - return - } +// FakeServiceHealthServer is a fake ServiceHealthServer for test programs +type FakeServiceHealthServer struct{} - if err := server.Serve(listener); err != nil { - klog.Errorf("Healthz closed with error: %v", err) - return - } - klog.Error("Unexpected healthz closed.") - }, nodeHealthzRetryInterval, wait.NeverStop) +// NewFakeServiceHealthServer allocates a new fake service healthcheck server manager +func NewFakeServiceHealthServer() ServiceHealthServer { + return FakeServiceHealthServer{} } -type healthzHandler struct { - hs *HealthzServer +// SyncServices is part of ServiceHealthServer +func (fake FakeServiceHealthServer) SyncServices(_ map[types.NamespacedName]uint16) error { + return nil } -func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - lastUpdated := time.Time{} - if val := h.hs.lastUpdated.Load(); val != nil { - lastUpdated = val.(time.Time) - } - currentTime := h.hs.clock.Now() - - resp.Header().Set("Content-Type", "application/json") - if !lastUpdated.IsZero() && currentTime.After(lastUpdated.Add(h.hs.healthTimeout)) { - resp.WriteHeader(http.StatusServiceUnavailable) - } else { - resp.WriteHeader(http.StatusOK) - } - fmt.Fprintf(resp, fmt.Sprintf(`{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime)) +// SyncEndpoints is part of ServiceHealthServer +func (fake FakeServiceHealthServer) SyncEndpoints(_ map[types.NamespacedName]int) error { + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD index b45e983765f..188925f9588 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD @@ -21,7 +21,7 @@ go_library( "//pkg/util/iptables:go_default_library", "//pkg/util/sysctl:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -38,6 +38,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/proxy:go_default_library", + "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/util:go_default_library", "//pkg/proxy/util/testing:go_default_library", "//pkg/util/async:go_default_library", @@ -45,7 +46,7 @@ go_test( "//pkg/util/iptables:go_default_library", "//pkg/util/iptables/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index c537d5bfae7..69d5d4adfea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -26,6 +26,7 @@ import ( "encoding/base32" "fmt" "net" + "reflect" "strconv" "strings" "sync" @@ -33,7 +34,7 @@ import ( "time" "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -181,6 +182,7 @@ type Proxier struct { serviceMap proxy.ServiceMap endpointsMap proxy.EndpointsMap portsMap map[utilproxy.LocalPort]utilproxy.Closeable + nodeLabels map[string]string // endpointsSynced, endpointSlicesSynced, and servicesSynced are set to true // when corresponding objects are synced after startup. This is used to avoid // updating iptables with some partial data after kube-proxy restart. @@ -189,6 +191,7 @@ type Proxier struct { servicesSynced bool initialized int32 syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules + syncPeriod time.Duration // These are effectively const and do not need the mutex to be held. iptables utiliptables.Interface @@ -200,8 +203,9 @@ type Proxier struct { nodeIP net.IP portMapper utilproxy.PortOpener recorder record.EventRecorder - healthChecker healthcheck.Server - healthzServer healthcheck.HealthzUpdater + + serviceHealthServer healthcheck.ServiceHealthServer + healthzServer healthcheck.ProxierHealthUpdater // Since converting probabilities (floats) to strings is expensive // and we are using only probabilities in the format of 1/n, we are @@ -257,7 +261,7 @@ func NewProxier(ipt utiliptables.Interface, hostname string, nodeIP net.IP, recorder record.EventRecorder, - healthzServer healthcheck.HealthzUpdater, + healthzServer healthcheck.ProxierHealthUpdater, nodePortAddresses []string, ) (*Proxier, error) { // Set the route_localnet sysctl we need for @@ -278,11 +282,6 @@ func NewProxier(ipt utiliptables.Interface, masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) - if nodeIP == nil { - klog.Warning("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") - nodeIP = net.ParseIP("127.0.0.1") - } - if len(clusterCIDR) == 0 { klog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic") } else if utilnet.IsIPv6CIDRString(clusterCIDR) != ipt.IsIpv6() { @@ -291,7 +290,7 @@ func NewProxier(ipt utiliptables.Interface, endpointSlicesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) - healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps + serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder) isIPv6 := ipt.IsIpv6() proxier := &Proxier{ @@ -300,6 +299,7 @@ func NewProxier(ipt utiliptables.Interface, serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder), endpointsMap: make(proxy.EndpointsMap), endpointsChanges: proxy.NewEndpointChangeTracker(hostname, newEndpointInfo, &isIPv6, recorder, endpointSlicesEnabled), + syncPeriod: syncPeriod, iptables: ipt, masqueradeAll: masqueradeAll, masqueradeMark: masqueradeMark, @@ -309,7 +309,7 @@ func NewProxier(ipt utiliptables.Interface, nodeIP: nodeIP, portMapper: &listenPortOpener{}, recorder: recorder, - healthChecker: healthChecker, + serviceHealthServer: serviceHealthServer, healthzServer: healthzServer, precomputedProbabilities: make([]string, 0, 1001), iptablesData: bytes.NewBuffer(nil), @@ -323,7 +323,13 @@ func NewProxier(ipt utiliptables.Interface, } burstSyncs := 2 klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) - proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) + // We pass syncPeriod to ipt.Monitor, which will call us only if it needs to. + // We need to pass *some* maxInterval to NewBoundedFrequencyRunner anyway though. + // time.Hour is arbitrary. + proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs) + go ipt.Monitor(utiliptables.Chain("KUBE-PROXY-CANARY"), + []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter}, + proxier.syncProxyRules, syncPeriod, wait.NeverStop) return proxier, nil } @@ -432,7 +438,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { } func computeProbability(n int) string { - return fmt.Sprintf("%0.5f", 1.0/float64(n)) + return fmt.Sprintf("%0.10f", 1.0/float64(n)) } // This assumes proxier.mu is held @@ -455,6 +461,9 @@ func (proxier *Proxier) probability(n int) string { // Sync is called to synchronize the proxier state to iptables as soon as possible. func (proxier *Proxier) Sync() { + if proxier.healthzServer != nil { + proxier.healthzServer.QueuedUpdate() + } proxier.syncRunner.Run() } @@ -462,7 +471,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } proxier.syncRunner.Loop(wait.NeverStop) } @@ -489,7 +498,7 @@ func (proxier *Proxier) OnServiceAdd(service *v1.Service) { // service object is observed. func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } @@ -505,7 +514,11 @@ func (proxier *Proxier) OnServiceDelete(service *v1.Service) { func (proxier *Proxier) OnServiceSynced() { proxier.mu.Lock() proxier.servicesSynced = true - proxier.setInitialized(proxier.endpointsSynced || proxier.endpointSlicesSynced) + if utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) { + proxier.setInitialized(proxier.endpointSlicesSynced) + } else { + proxier.setInitialized(proxier.endpointsSynced) + } proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. @@ -537,7 +550,7 @@ func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { func (proxier *Proxier) OnEndpointsSynced() { proxier.mu.Lock() proxier.endpointsSynced = true - proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced) + proxier.setInitialized(proxier.servicesSynced) proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. @@ -573,13 +586,65 @@ func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointS func (proxier *Proxier) OnEndpointSlicesSynced() { proxier.mu.Lock() proxier.endpointSlicesSynced = true - proxier.setInitialized(proxier.servicesSynced && proxier.endpointSlicesSynced) + proxier.setInitialized(proxier.servicesSynced) proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. proxier.syncProxyRules() } +// OnNodeAdd is called whenever creation of new node object +// is observed. +func (proxier *Proxier) OnNodeAdd(node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + oldLabels := proxier.nodeLabels + newLabels := node.Labels + proxier.mu.Lock() + proxier.nodeLabels = newLabels + proxier.mu.Unlock() + if !reflect.DeepEqual(oldLabels, newLabels) { + proxier.syncProxyRules() + } +} + +// OnNodeUpdate is called whenever modification of an existing +// node object is observed. +func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + oldLabels := proxier.nodeLabels + newLabels := node.Labels + proxier.mu.Lock() + proxier.nodeLabels = newLabels + proxier.mu.Unlock() + if !reflect.DeepEqual(oldLabels, newLabels) { + proxier.syncProxyRules() + } +} + +// OnNodeDelete is called whever deletion of an existing node +// object is observed. +func (proxier *Proxier) OnNodeDelete(node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + proxier.mu.Lock() + proxier.nodeLabels = nil + proxier.mu.Unlock() + proxier.syncProxyRules() +} + +// OnNodeSynced is called once all the initial event handlers were +// called and the state is fully propagated to local cache. +func (proxier *Proxier) OnNodeSynced() { +} + // portProtoHash takes the ServicePortName and protocol for a service // returns the associated 16 character hash. This is computed by hashing (sha256) // then encoding to base32 and truncating to 16 chars. We do this because IPTables @@ -675,7 +740,7 @@ func (proxier *Proxier) syncProxyRules() { defer proxier.mu.Unlock() // don't sync rules till we've received services and endpoints - if !proxier.endpointsSynced || !proxier.servicesSynced { + if !proxier.isInitialized() { klog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") return } @@ -708,6 +773,14 @@ func (proxier *Proxier) syncProxyRules() { klog.V(3).Info("Syncing iptables rules") + success := false + defer func() { + if !success { + klog.Infof("Sync failed; retrying in %s", proxier.syncPeriod) + proxier.syncRunner.RetryAfter(proxier.syncPeriod) + } + }() + // Create and link the kube chains. for _, jump := range iptablesJumpChains { if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { @@ -769,7 +842,7 @@ func (proxier *Proxier) syncProxyRules() { writeLine(proxier.filterChains, utiliptables.MakeChainLine(chainName)) } } - for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} { + for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain, KubeMarkDropChain} { if chain, ok := existingNATChains[chainName]; ok { writeBytesLine(proxier.natChains, chain) } else { @@ -839,7 +912,20 @@ func (proxier *Proxier) syncProxyRules() { isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP()) protocol := strings.ToLower(string(svcInfo.Protocol())) svcNameString := svcInfo.serviceNameString - hasEndpoints := len(proxier.endpointsMap[svcName]) > 0 + + allEndpoints := proxier.endpointsMap[svcName] + + hasEndpoints := len(allEndpoints) > 0 + + // Service Topology will not be enabled in the following cases: + // 1. externalTrafficPolicy=Local (mutually exclusive with service topology). + // 2. ServiceTopology is not enabled. + // 3. EndpointSlice is not enabled (service topology depends on endpoint slice + // to get topology information). + if !svcInfo.OnlyNodeLocalEndpoints() && utilfeature.DefaultFeatureGate.Enabled(features.ServiceTopology) && utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) { + allEndpoints = proxy.FilterTopologyEndpoint(proxier.nodeLabels, svcInfo.TopologyKeys(), allEndpoints) + hasEndpoints = len(allEndpoints) > 0 + } svcChain := svcInfo.servicePortChainName if hasEndpoints { @@ -1149,12 +1235,13 @@ func (proxier *Proxier) syncProxyRules() { endpoints = endpoints[:0] endpointChains = endpointChains[:0] var endpointChain utiliptables.Chain - for _, ep := range proxier.endpointsMap[svcName] { + for _, ep := range allEndpoints { epInfo, ok := ep.(*endpointsInfo) if !ok { klog.Errorf("Failed to cast endpointsInfo %q", ep.String()) continue } + endpoints = append(endpoints, epInfo) endpointChain = epInfo.endpointChain(svcNameString, protocol) endpointChains = append(endpointChains, endpointChain) @@ -1201,6 +1288,7 @@ func (proxier *Proxier) syncProxyRules() { // Error parsing this endpoint has been logged. Skip to next endpoint. continue } + // Balancing rules in the per-service chain. args = append(args[:0], "-A", string(svcChain)) proxier.appendServiceCommentLocked(args, svcNameString) @@ -1423,6 +1511,8 @@ func (proxier *Proxier) syncProxyRules() { utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return } + success = true + for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes { for _, lastChangeTriggerTime := range lastChangeTriggerTimes { latency := metrics.SinceInSeconds(lastChangeTriggerTime) @@ -1439,19 +1529,18 @@ func (proxier *Proxier) syncProxyRules() { } proxier.portsMap = replacementPortsMap - // Update healthz timestamp. if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() - // Update healthchecks. The endpoints list might include services that are - // not "OnlyLocal", but the services list will not, and the healthChecker + // Update service healthchecks. The endpoints list might include services that are + // not "OnlyLocal", but the services list will not, and the serviceHealthServer // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { + if err := proxier.serviceHealthServer.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { klog.Errorf("Error syncing healthcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { + if err := proxier.serviceHealthServer.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { klog.Errorf("Error syncing healthcheck endpoints: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD index 0da921e54c6..9a45bf55c4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD @@ -16,9 +16,11 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/proxy:go_default_library", + "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/ipvs/testing:go_default_library", "//pkg/proxy/util:go_default_library", "//pkg/proxy/util/testing:go_default_library", + "//pkg/util/async:go_default_library", "//pkg/util/ipset:go_default_library", "//pkg/util/ipset/testing:go_default_library", "//pkg/util/iptables:go_default_library", @@ -26,7 +28,7 @@ go_test( "//pkg/util/ipvs:go_default_library", "//pkg/util/ipvs/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -54,6 +56,7 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", + "//pkg/proxy/config:go_default_library", "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/util:go_default_library", @@ -64,7 +67,7 @@ go_library( "//pkg/util/ipvs:go_default_library", "//pkg/util/sysctl:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", @@ -75,6 +78,10 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/net:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/vishvananda/netlink:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/vishvananda/netlink:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md index 7395a6c990f..b90147cadce 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md @@ -302,7 +302,7 @@ modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 # to check loaded modules, use -lsmod | grep -e ipvs -e nf_conntrack_ipv4 +lsmod | grep -e ip_vs -e nf_conntrack_ipv4 # or cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4 ``` diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go index bed64ff75c0..419b1a03598 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go @@ -22,8 +22,9 @@ import ( utilipset "k8s.io/kubernetes/pkg/util/ipset" "fmt" - "k8s.io/klog" "strings" + + "k8s.io/klog" ) const ( @@ -66,16 +67,6 @@ const ( kubeNodePortLocalSetUDPComment = "Kubernetes nodeport UDP port with externalTrafficPolicy=local" kubeNodePortLocalSetUDP = "KUBE-NODE-PORT-LOCAL-UDP" - // This ipset is no longer active but still used in previous versions. - // DO NOT create an ipset using this name - legacyKubeNodePortSetSCTPComment = "Kubernetes nodeport SCTP port for masquerade purpose" - legacyKubeNodePortSetSCTP = "KUBE-NODE-PORT-SCTP" - - // This ipset is no longer active but still used in previous versions. - // DO NOT create an ipset using this name - legacyKubeNodePortLocalSetSCTPComment = "Kubernetes nodeport SCTP port with externalTrafficPolicy=local" - legacyKubeNodePortLocalSetSCTP = "KUBE-NODE-PORT-LOCAL-SCTP" - kubeNodePortSetSCTPComment = "Kubernetes nodeport SCTP port for masquerade purpose with type 'hash ip:port'" kubeNodePortSetSCTP = "KUBE-NODE-PORT-SCTP-HASH" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/meta_proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/meta_proxier.go index 45083c61909..062ac3feee5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/meta_proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/meta_proxier.go @@ -22,14 +22,18 @@ import ( "k8s.io/api/core/v1" "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" + "k8s.io/kubernetes/pkg/proxy/config" + utilnet "k8s.io/utils/net" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" ) type metaProxier struct { ipv4Proxier proxy.Provider ipv6Proxier proxy.Provider + // TODO(imroc): implement node handler for meta proxier. + config.NoopNodeHandler } // NewMetaProxier returns a dual-stack "meta-proxier". Proxier API @@ -153,25 +157,47 @@ func (proxier *metaProxier) OnEndpointsSynced() { // OnEndpointSliceAdd is called whenever creation of a new endpoint slice object // is observed. func (proxier *metaProxier) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) { - // noop + switch endpointSlice.AddressType { + case discovery.AddressTypeIPv4: + proxier.ipv4Proxier.OnEndpointSliceAdd(endpointSlice) + case discovery.AddressTypeIPv6: + proxier.ipv6Proxier.OnEndpointSliceAdd(endpointSlice) + default: + klog.V(4).Infof("EndpointSlice address type not supported by kube-proxy: %s", endpointSlice.AddressType) + } } // OnEndpointSliceUpdate is called whenever modification of an existing endpoint // slice object is observed. -func (proxier *metaProxier) OnEndpointSliceUpdate(_, endpointSlice *discovery.EndpointSlice) { - //noop +func (proxier *metaProxier) OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice *discovery.EndpointSlice) { + switch newEndpointSlice.AddressType { + case discovery.AddressTypeIPv4: + proxier.ipv4Proxier.OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice) + case discovery.AddressTypeIPv6: + proxier.ipv6Proxier.OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice) + default: + klog.V(4).Infof("EndpointSlice address type not supported by kube-proxy: %s", newEndpointSlice.AddressType) + } } // OnEndpointSliceDelete is called whenever deletion of an existing endpoint slice // object is observed. func (proxier *metaProxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) { - //noop + switch endpointSlice.AddressType { + case discovery.AddressTypeIPv4: + proxier.ipv4Proxier.OnEndpointSliceDelete(endpointSlice) + case discovery.AddressTypeIPv6: + proxier.ipv6Proxier.OnEndpointSliceDelete(endpointSlice) + default: + klog.V(4).Infof("EndpointSlice address type not supported by kube-proxy: %s", endpointSlice.AddressType) + } } // OnEndpointSlicesSynced is called once all the initial event handlers were // called and the state is fully propagated to local cache. func (proxier *metaProxier) OnEndpointSlicesSynced() { - //noop + proxier.ipv4Proxier.OnEndpointSlicesSynced() + proxier.ipv6Proxier.OnEndpointSlicesSynced() } // endpointsIPFamily that returns IPFamily of endpoints or error if diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index 1f6ed8e81c5..2533817dda9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "net" "os" + "reflect" "regexp" "strconv" "strings" @@ -30,15 +31,18 @@ import ( "sync/atomic" "time" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + utilnet "k8s.io/utils/net" + v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1alpha1" + discovery "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -50,8 +54,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilexec "k8s.io/utils/exec" - utilnet "k8s.io/utils/net" ) const ( @@ -200,6 +202,7 @@ type Proxier struct { serviceMap proxy.ServiceMap endpointsMap proxy.EndpointsMap portsMap map[utilproxy.LocalPort]utilproxy.Closeable + nodeLabels map[string]string // endpointsSynced, endpointSlicesSynced, and servicesSynced are set to true when // corresponding objects are synced after startup. This is used to avoid updating // ipvs rules with some partial data after kube-proxy restart. @@ -227,9 +230,11 @@ type Proxier struct { nodeIP net.IP portMapper utilproxy.PortOpener recorder record.EventRecorder - healthChecker healthcheck.Server - healthzServer healthcheck.HealthzUpdater - ipvsScheduler string + + serviceHealthServer healthcheck.ServiceHealthServer + healthzServer healthcheck.ProxierHealthUpdater + + ipvsScheduler string // Added as a member to the struct to allow injection for testing. ipGetter IPGetter // The following buffers are used to reuse memory and avoid allocations @@ -328,7 +333,7 @@ func NewProxier(ipt utiliptables.Interface, hostname string, nodeIP net.IP, recorder record.EventRecorder, - healthzServer healthcheck.HealthzUpdater, + healthzServer healthcheck.ProxierHealthUpdater, scheduler string, nodePortAddresses []string, ) (*Proxier, error) { @@ -401,11 +406,6 @@ func NewProxier(ipt utiliptables.Interface, masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) - if nodeIP == nil { - klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") - nodeIP = net.ParseIP("127.0.0.1") - } - isIPv6 := utilnet.IsIPv6(nodeIP) klog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) @@ -421,7 +421,7 @@ func NewProxier(ipt utiliptables.Interface, scheduler = DefaultScheduler } - healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps + serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder) endpointSlicesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) @@ -443,7 +443,7 @@ func NewProxier(ipt utiliptables.Interface, nodeIP: nodeIP, portMapper: &listenPortOpener{}, recorder: recorder, - healthChecker: healthChecker, + serviceHealthServer: serviceHealthServer, healthzServer: healthzServer, ipvs: ipvs, ipvsScheduler: scheduler, @@ -489,7 +489,7 @@ func NewDualStackProxier( hostname string, nodeIP [2]net.IP, recorder record.EventRecorder, - healthzServer healthcheck.HealthzUpdater, + healthzServer healthcheck.ProxierHealthUpdater, scheduler string, nodePortAddresses []string, ) (proxy.Provider, error) { @@ -578,30 +578,12 @@ func (handle *LinuxKernelHandler) GetModules() ([]string, error) { } ipvsModules := utilipvs.GetRequiredIPVSModules(kernelVersion) - builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersionStr) - b, err := ioutil.ReadFile(builtinModsFilePath) - if err != nil { - klog.Warningf("Failed to read file %s with error %v. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", builtinModsFilePath, err) - } var bmods []string - for _, module := range ipvsModules { - if match, _ := regexp.Match(module+".ko", b); match { - bmods = append(bmods, module) - } - } - - // Try to load IPVS required kernel modules using modprobe first - for _, kmod := range ipvsModules { - err := handle.executor.Command("modprobe", "--", kmod).Run() - if err != nil { - klog.Warningf("Failed to load kernel module %v with modprobe. "+ - "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", kmod) - } - } - // Find out loaded kernel modules + // Find out loaded kernel modules. If this is a full static kernel it will thrown an error modulesFile, err := os.Open("/proc/modules") if err != nil { + klog.Warningf("Failed to read file /proc/modules with error %v. Kube-proxy requires loadable modules support enabled in the kernel", err) return nil, err } @@ -610,6 +592,25 @@ func (handle *LinuxKernelHandler) GetModules() ([]string, error) { return nil, fmt.Errorf("failed to find loaded kernel modules: %v", err) } + builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersionStr) + b, err := ioutil.ReadFile(builtinModsFilePath) + if err != nil { + klog.Warningf("Failed to read file %s with error %v. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", builtinModsFilePath, err) + } + + for _, module := range ipvsModules { + if match, _ := regexp.Match(module+".ko", b); match { + bmods = append(bmods, module) + } else { + // Try to load the required IPVS kernel modules if not built in + err := handle.executor.Command("modprobe", "--", module).Run() + if err != nil { + klog.Warningf("Failed to load kernel module %v with modprobe. "+ + "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", module) + } + } + } + return append(mods, bmods...), nil } @@ -775,6 +776,9 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset // Sync is called to synchronize the proxier state to iptables and ipvs as soon as possible. func (proxier *Proxier) Sync() { + if proxier.healthzServer != nil { + proxier.healthzServer.QueuedUpdate() + } proxier.syncRunner.Run() } @@ -782,7 +786,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } proxier.syncRunner.Loop(wait.NeverStop) } @@ -807,7 +811,7 @@ func (proxier *Proxier) OnServiceAdd(service *v1.Service) { // OnServiceUpdate is called whenever modification of an existing service object is observed. func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } @@ -820,7 +824,11 @@ func (proxier *Proxier) OnServiceDelete(service *v1.Service) { func (proxier *Proxier) OnServiceSynced() { proxier.mu.Lock() proxier.servicesSynced = true - proxier.setInitialized(proxier.endpointsSynced || proxier.endpointSlicesSynced) + if utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) { + proxier.setInitialized(proxier.endpointSlicesSynced) + } else { + proxier.setInitialized(proxier.endpointsSynced) + } proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. @@ -835,7 +843,7 @@ func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) { // OnEndpointsUpdate is called whenever modification of an existing endpoints object is observed. func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { if proxier.endpointsChanges.Update(oldEndpoints, endpoints) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } @@ -848,7 +856,7 @@ func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { func (proxier *Proxier) OnEndpointsSynced() { proxier.mu.Lock() proxier.endpointsSynced = true - proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced) + proxier.setInitialized(proxier.servicesSynced) proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. @@ -884,16 +892,95 @@ func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointS func (proxier *Proxier) OnEndpointSlicesSynced() { proxier.mu.Lock() proxier.endpointSlicesSynced = true - proxier.setInitialized(proxier.servicesSynced && proxier.endpointSlicesSynced) + proxier.setInitialized(proxier.servicesSynced) proxier.mu.Unlock() // Sync unconditionally - this is called once per lifetime. proxier.syncProxyRules() } +// OnNodeAdd is called whenever creation of new node object +// is observed. +func (proxier *Proxier) OnNodeAdd(node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + oldLabels := proxier.nodeLabels + newLabels := node.Labels + proxier.mu.Lock() + proxier.nodeLabels = newLabels + proxier.mu.Unlock() + if !reflect.DeepEqual(oldLabels, newLabels) { + proxier.syncProxyRules() + } +} + +// OnNodeUpdate is called whenever modification of an existing +// node object is observed. +func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + oldLabels := proxier.nodeLabels + newLabels := node.Labels + proxier.mu.Lock() + proxier.nodeLabels = newLabels + proxier.mu.Unlock() + if !reflect.DeepEqual(oldLabels, newLabels) { + proxier.syncProxyRules() + } +} + +// OnNodeDelete is called whever deletion of an existing node +// object is observed. +func (proxier *Proxier) OnNodeDelete(node *v1.Node) { + if node.Name != proxier.hostname { + klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname) + return + } + proxier.mu.Lock() + proxier.nodeLabels = nil + proxier.mu.Unlock() + proxier.syncProxyRules() +} + +// OnNodeSynced is called once all the initial event handlers were +// called and the state is fully propagated to local cache. +func (proxier *Proxier) OnNodeSynced() { +} + // EntryInvalidErr indicates if an ipset entry is invalid or not const EntryInvalidErr = "error adding entry %s to ipset %s" +func getLocalAddrs() ([]net.IP, error) { + var localAddrs []net.IP + + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + localAddrs = append(localAddrs, ip) + } + return localAddrs, nil +} + +func ipExists(ip net.IP, addrs []net.IP) bool { + for _, addr := range addrs { + if ip.Equal(addr) { + return true + } + } + return false +} + // This is where all of the ipvs calls happen. // assumes proxier.mu is held func (proxier *Proxier) syncProxyRules() { @@ -901,7 +988,7 @@ func (proxier *Proxier) syncProxyRules() { defer proxier.mu.Unlock() // don't sync rules till we've received services and endpoints - if !proxier.endpointsSynced || !proxier.servicesSynced { + if !proxier.isInitialized() { klog.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master") return } @@ -914,6 +1001,11 @@ func (proxier *Proxier) syncProxyRules() { klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() + localAddrs, err := getLocalAddrs() + if err != nil { + klog.Errorf("Failed to get local addresses during proxy sync: %v", err) + } + // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. @@ -950,7 +1042,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.createAndLinkeKubeChain() // make sure dummy interface exists in the system where ipvs Proxier will bind service address on it - _, err := proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) + _, err = proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) if err != nil { klog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) return @@ -1095,11 +1187,10 @@ func (proxier *Proxier) syncProxyRules() { // Capture externalIPs. for _, externalIP := range svcInfo.ExternalIPStrings() { - if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - klog.Errorf("can't determine if IP is local, assuming not: %v", err) - // We do not start listening on SCTP ports, according to our agreement in the - // SCTP support KEP - } else if local && (svcInfo.Protocol() != v1.ProtocolSCTP) { + if len(localAddrs) == 0 { + klog.Errorf("couldn't find any local IPs, assuming %s is not local", externalIP) + } else if (svcInfo.Protocol() != v1.ProtocolSCTP) && ipExists(net.ParseIP(externalIP), localAddrs) { + // We do not start listening on SCTP ports, according to our agreement in the SCTP support KEP lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, IP: externalIP, @@ -1485,19 +1576,18 @@ func (proxier *Proxier) syncProxyRules() { } proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices, legacyBindAddrs) - // Update healthz timestamp if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() - // Update healthchecks. The endpoints list might include services that are - // not "OnlyLocal", but the services list will not, and the healthChecker + // Update service healthchecks. The endpoints list might include services that are + // not "OnlyLocal", but the services list will not, and the serviceHealthServer // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { + if err := proxier.serviceHealthServer.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { klog.Errorf("Error syncing healthcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { + if err := proxier.serviceHealthServer.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { klog.Errorf("Error syncing healthcheck endpoints: %v", err) } @@ -1831,7 +1921,18 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode curEndpoints.Insert(des.String()) } - for _, epInfo := range proxier.endpointsMap[svcPortName] { + endpoints := proxier.endpointsMap[svcPortName] + + // Service Topology will not be enabled in the following cases: + // 1. externalTrafficPolicy=Local (mutually exclusive with service topology). + // 2. ServiceTopology is not enabled. + // 3. EndpointSlice is not enabled (service topology depends on endpoint slice + // to get topology information). + if !onlyNodeLocalEndpoints && utilfeature.DefaultFeatureGate.Enabled(features.ServiceTopology) && utilfeature.DefaultFeatureGate.Enabled(features.EndpointSlice) { + endpoints = proxy.FilterTopologyEndpoint(proxier.nodeLabels, proxier.serviceMap[svcPortName].TopologyKeys(), endpoints) + } + + for _, epInfo := range endpoints { if onlyNodeLocalEndpoints && !epInfo.GetIsLocal() { continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD index 32c501a411a..712222f5d19 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go index 4a0cc02d905..8282091dd7c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -35,7 +33,7 @@ var ( Subsystem: kubeProxySubsystem, Name: "sync_proxy_rules_duration_seconds", Help: "SyncProxyRules latency in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) @@ -43,11 +41,12 @@ var ( // DeprecatedSyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy rules. DeprecatedSyncProxyRulesLatency = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: kubeProxySubsystem, - Name: "sync_proxy_rules_latency_microseconds", - Help: "(Deprecated) SyncProxyRules latency in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_latency_microseconds", + Help: "SyncProxyRules latency in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) @@ -75,10 +74,10 @@ var ( Name: "network_programming_duration_seconds", Help: "In Cluster Network Programming Latency in seconds", Buckets: merge( - prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s - prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s - prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s - prometheus.LinearBuckets(120, 30, 7), // 2min, 2.5min, 3min, ..., 5min + metrics.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s + metrics.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s + metrics.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s + metrics.LinearBuckets(120, 30, 7), // 2min, 2.5min, 3min, ..., 5min ), StabilityLevel: metrics.ALPHA, }, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go index 09db239a3a9..5a18d2c7941 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go @@ -51,6 +51,7 @@ type BaseServiceInfo struct { loadBalancerSourceRanges []string healthCheckNodePort int onlyNodeLocalEndpoints bool + topologyKeys []string } var _ ServicePort = &BaseServiceInfo{} @@ -119,6 +120,11 @@ func (info *BaseServiceInfo) OnlyNodeLocalEndpoints() bool { return info.onlyNodeLocalEndpoints } +// TopologyKeys is part of ServicePort interface. +func (info *BaseServiceInfo) TopologyKeys() []string { + return info.topologyKeys +} + func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, service *v1.Service) *BaseServiceInfo { onlyNodeLocalEndpoints := false if apiservice.RequestsOnlyLocalTraffic(service) { @@ -139,6 +145,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic sessionAffinityType: service.Spec.SessionAffinity, stickyMaxAgeSeconds: stickyMaxAgeSeconds, onlyNodeLocalEndpoints: onlyNodeLocalEndpoints, + topologyKeys: service.Spec.TopologyKeys, } if sct.isIPv6Mode == nil { @@ -299,7 +306,7 @@ func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) Servic serviceMap := make(ServiceMap) for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] - svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name} + svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name, Protocol: servicePort.Protocol} baseSvcInfo := sct.newBaseServiceInfo(servicePort, service) if sct.makeServiceInfo != nil { serviceMap[svcPortName] = sct.makeServiceInfo(servicePort, service, baseSvcInfo) @@ -325,7 +332,6 @@ func (sm *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP set // clear changes after applying them to ServiceMap. changes.items = make(map[types.NamespacedName]*serviceChange) metrics.ServiceChangesPending.Set(0) - return } // merge adds other ServiceMap's elements to current ServiceMap. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go new file mode 100644 index 00000000000..fda3348e410 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + v1 "k8s.io/api/core/v1" +) + +// FilterTopologyEndpoint returns the appropriate endpoints based on the cluster +// topology. +// This uses the current node's labels, which contain topology information, and +// the required topologyKeys to find appropriate endpoints. If both the endpoint's +// topology and the current node have matching values for topologyKeys[0], the +// endpoint will be chosen. If no endpoints are chosen, toplogyKeys[1] will be +// considered, and so on. If either the node or the endpoint do not have values +// for a key, it is considered to not match. +// +// If topologyKeys is specified, but no endpoints are chosen for any key, the +// the service has no viable endpoints for clients on this node, and connections +// should fail. +// +// The special key "*" may be used as the last entry in topologyKeys to indicate +// "any endpoint" is acceptable. +// +// If topologyKeys is not specified or empty, no topology constraints will be +// applied and this will return all endpoints. +func FilterTopologyEndpoint(nodeLabels map[string]string, topologyKeys []string, endpoints []Endpoint) []Endpoint { + // Do not filter endpoints if service has no topology keys. + if len(topologyKeys) == 0 { + return endpoints + } + + filteredEndpoint := []Endpoint{} + + if len(nodeLabels) == 0 { + if topologyKeys[len(topologyKeys)-1] == v1.TopologyKeyAny { + // edge case: include all endpoints if topology key "Any" specified + // when we cannot determine current node's topology. + return endpoints + } + // edge case: do not include any endpoints if topology key "Any" is + // not specified when we cannot determine current node's topology. + return filteredEndpoint + } + + for _, key := range topologyKeys { + if key == v1.TopologyKeyAny { + return endpoints + } + topologyValue, found := nodeLabels[key] + if !found { + continue + } + + for _, ep := range endpoints { + topology := ep.GetTopology() + if value, found := topology[key]; found && value == topologyValue { + filteredEndpoint = append(filteredEndpoint, ep) + } + } + if len(filteredEndpoint) > 0 { + return filteredEndpoint + } + } + return filteredEndpoint +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go index c8ddf924c9c..fae2c549ce3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go @@ -30,6 +30,7 @@ type Provider interface { config.EndpointsHandler config.EndpointSliceHandler config.ServiceHandler + config.NodeHandler // Sync immediately synchronizes the Provider's current state to proxy rules. Sync() @@ -43,7 +44,8 @@ type Provider interface { // identifier for a load-balanced service. type ServicePortName struct { types.NamespacedName - Port string + Port string + Protocol v1.Protocol } func (spn ServicePortName) String() string { @@ -76,6 +78,8 @@ type ServicePort interface { NodePort() int // GetOnlyNodeLocalEndpoints returns if a service has only node local endpoints OnlyNodeLocalEndpoints() bool + // TopologyKeys returns service TopologyKeys as a string array. + TopologyKeys() []string } // Endpoint in an interface which abstracts information about an endpoint. @@ -86,6 +90,8 @@ type Endpoint interface { String() string // GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false. GetIsLocal() bool + // GetTopology returns the topology information of the endpoint. + GetTopology() map[string]string // IP returns IP part of the endpoint. IP() string // Port returns the Port part of the endpoint. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD index 0d756475daf..9c76a02c01e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD @@ -19,7 +19,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/proxy/userspace", deps = [ - "//pkg/apis/core/v1/helper:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/config:go_default_library", "//pkg/proxy/util:go_default_library", @@ -34,6 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ @@ -49,6 +49,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], @@ -83,7 +86,7 @@ go_test( "//pkg/proxy:go_default_library", "//pkg/util/iptables/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go index 9e07f0f6073..7a34529d6ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go @@ -32,8 +32,8 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + servicehelper "k8s.io/cloud-provider/service/helpers" "k8s.io/klog" - "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/config" utilproxy "k8s.io/kubernetes/pkg/proxy/util" @@ -112,6 +112,8 @@ type asyncRunnerInterface interface { type Proxier struct { // EndpointSlice support has not been added for this proxier yet. config.NoopEndpointSliceHandler + // TODO(imroc): implement node handler for userspace proxier. + config.NoopNodeHandler loadBalancer LoadBalancer mu sync.Mutex // protects serviceMap @@ -172,11 +174,6 @@ var ( ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost") ) -// IsProxyLocked returns true if the proxy could not acquire the lock on iptables. -func IsProxyLocked(err error) bool { - return strings.Contains(err.Error(), "holding the xtables lock") -} - // NewProxier returns a new Proxier given a LoadBalancer and an address on // which to listen. Because of the iptables logic, It is assumed that there // is only a single Proxier active on a machine. An error will be returned if @@ -678,7 +675,7 @@ func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bo if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) { return false } - if !helper.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) { + if !servicehelper.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) { return false } if info.sessionAffinityType != service.Spec.SessionAffinity { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go index 33c16f41292..29259a4bc3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/util" @@ -201,22 +202,20 @@ func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.Service // Loop through the valid endpoints and then the endpoints associated with the Load Balancer. // Then remove any session affinity records that are not in both lists. // This assumes the lb.lock is held. -func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) { - allEndpoints := map[string]int{} +func (lb *LoadBalancerRR) removeStaleAffinity(svcPort proxy.ServicePortName, newEndpoints []string) { + newEndpointsSet := sets.NewString() for _, newEndpoint := range newEndpoints { - allEndpoints[newEndpoint] = 1 + newEndpointsSet.Insert(newEndpoint) } + state, exists := lb.services[svcPort] if !exists { return } for _, existingEndpoint := range state.endpoints { - allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1 - } - for mKey, mVal := range allEndpoints { - if mVal == 1 { - klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) - removeSessionAffinityByEndpoint(state, svcPort, mKey) + if !newEndpointsSet.Has(existingEndpoint) { + klog.V(2).Infof("Delete endpoint %s for service %q", existingEndpoint, svcPort) + removeSessionAffinityByEndpoint(state, svcPort, existingEndpoint) } } } @@ -234,11 +233,9 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { if !exists || state == nil || len(newEndpoints) > 0 { klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) - lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsAdd can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created - // if one does not already exist. The affinity will be updated - // later, once NewService is called. + // if one does not already exist. state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0) state.endpoints = util.ShuffleStrings(newEndpoints) @@ -268,7 +265,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) - lb.updateAffinityMap(svcPort, newEndpoints) + lb.removeStaleAffinity(svcPort, newEndpoints) // OnEndpointsUpdate can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created // if one does not already exist. The affinity will be updated diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD index 7ea992e6fef..204b7ee23f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD @@ -13,7 +13,6 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ "//pkg/api/v1/service:go_default_library", @@ -63,8 +62,9 @@ go_test( deps = select({ "@io_bazel_rules_go//go/platform:windows": [ "//pkg/proxy:go_default_library", + "//pkg/proxy/healthcheck:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/github.com/Microsoft/hcsshim/hcn:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go index 8f34daeffdd..5e0e095fc90 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -34,18 +32,19 @@ var ( Subsystem: kubeProxySubsystem, Name: "sync_proxy_rules_duration_seconds", Help: "SyncProxyRules latency in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedSyncProxyRulesLatency = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: kubeProxySubsystem, - Name: "sync_proxy_rules_latency_microseconds", - Help: "(Deprecated) SyncProxyRules latency in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_latency_microseconds", + Help: "SyncProxyRules latency in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index 5ef3daf2de7..b5a745acb89 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -444,6 +444,8 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxySe type Proxier struct { // EndpointSlice support has not been added for this proxier yet. proxyconfig.NoopEndpointSliceHandler + // TODO(imroc): implement node handler for winkernel proxier. + proxyconfig.NoopNodeHandler // endpointsChanges and serviceChanges contains all changes to endpoints and // services that happened since policies were synced. For a single object, @@ -471,8 +473,9 @@ type Proxier struct { hostname string nodeIP net.IP recorder record.EventRecorder - healthChecker healthcheck.Server - healthzServer healthcheck.HealthzUpdater + + serviceHealthServer healthcheck.ServiceHealthServer + healthzServer healthcheck.ProxierHealthUpdater // Since converting probabilities (floats) to strings is expensive // and we are using only probabilities in the format of 1/n, we are @@ -527,7 +530,7 @@ func NewProxier( hostname string, nodeIP net.IP, recorder record.EventRecorder, - healthzServer healthcheck.HealthzUpdater, + healthzServer healthcheck.ProxierHealthUpdater, config config.KubeProxyWinkernelConfiguration, ) (*Proxier, error) { masqueradeValue := 1 << uint(masqueradeBit) @@ -542,7 +545,7 @@ func NewProxier( klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } - healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps + serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder) var hns HostNetworkService hns = hnsV1{} supportedFeatures := hcn.GetSupportedFeatures() @@ -622,24 +625,24 @@ func NewProxier( } proxier := &Proxier{ - portsMap: make(map[localPort]closeable), - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(hostname), - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - clusterCIDR: clusterCIDR, - hostname: hostname, - nodeIP: nodeIP, - recorder: recorder, - healthChecker: healthChecker, - healthzServer: healthzServer, - hns: hns, - network: *hnsNetworkInfo, - sourceVip: sourceVip, - hostMac: hostMac, - isDSR: isDSR, + portsMap: make(map[localPort]closeable), + serviceMap: make(proxyServiceMap), + serviceChanges: newServiceChangeMap(), + endpointsMap: make(proxyEndpointsMap), + endpointsChanges: newEndpointsChangeMap(hostname), + masqueradeAll: masqueradeAll, + masqueradeMark: masqueradeMark, + clusterCIDR: clusterCIDR, + hostname: hostname, + nodeIP: nodeIP, + recorder: recorder, + serviceHealthServer: serviceHealthServer, + healthzServer: healthzServer, + hns: hns, + network: *hnsNetworkInfo, + sourceVip: sourceVip, + hostMac: hostMac, + isDSR: isDSR, } burstSyncs := 2 @@ -725,6 +728,9 @@ func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { // Sync is called to synchronize the proxier state to hns as soon as possible. func (proxier *Proxier) Sync() { + if proxier.healthzServer != nil { + proxier.healthzServer.QueuedUpdate() + } proxier.syncRunner.Run() } @@ -732,7 +738,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } proxier.syncRunner.Loop(wait.NeverStop) } @@ -752,21 +758,21 @@ func (proxier *Proxier) isInitialized() bool { func (proxier *Proxier) OnServiceAdd(service *v1.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if proxier.serviceChanges.update(&namespacedName, nil, service, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if proxier.serviceChanges.update(&namespacedName, oldService, service, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } func (proxier *Proxier) OnServiceDelete(service *v1.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if proxier.serviceChanges.update(&namespacedName, service, nil, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } @@ -827,21 +833,21 @@ func (proxier *Proxier) updateServiceMap() (result updateServiceMapResult) { func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} if proxier.endpointsChanges.update(&namespacedName, nil, endpoints, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} if proxier.endpointsChanges.update(&namespacedName, endpoints, nil, proxier.hns) && proxier.isInitialized() { - proxier.syncRunner.Run() + proxier.Sync() } } @@ -1276,19 +1282,18 @@ func (proxier *Proxier) syncProxyRules() { Log(svcInfo, "+++Policy Successfully applied for service +++", 2) } - // Update healthz timestamp. if proxier.healthzServer != nil { - proxier.healthzServer.UpdateTimestamp() + proxier.healthzServer.Updated() } SyncProxyRulesLastTimestamp.SetToCurrentTime() - // Update healthchecks. The endpoints list might include services that are - // not "OnlyLocal", but the services list will not, and the healthChecker + // Update service healthchecks. The endpoints list might include services that are + // not "OnlyLocal", but the services list will not, and the serviceHealthServer // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { + if err := proxier.serviceHealthServer.SyncServices(serviceUpdateResult.hcServices); err != nil { klog.Errorf("Error syncing healthcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { + if err := proxier.serviceHealthServer.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { klog.Errorf("Error syncing healthcheck endpoints: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD index f360cb67a40..eebb3ab3579 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD @@ -45,7 +45,7 @@ go_test( "//pkg/proxy:go_default_library", "//pkg/util/netsh/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/discovery/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/discovery/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go index e39b24f873b..544a39986b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go @@ -83,6 +83,8 @@ func logTimeout(err error) bool { type Proxier struct { // EndpointSlice support has not been added for this proxier yet. config.NoopEndpointSliceHandler + // TODO(imroc): implement node handler for winuserspace proxier. + config.NoopNodeHandler loadBalancer LoadBalancer mu sync.Mutex // protects serviceMap @@ -189,7 +191,7 @@ func (proxier *Proxier) cleanupStaleStickySessions() { }, Port: name.Port, } - if servicePortNameMap[servicePortName] == false { + if !servicePortNameMap[servicePortName] { // ensure cleanup sticky sessions only gets called once per serviceportname servicePortNameMap[servicePortName] = true proxier.loadBalancer.CleanupStaleStickySessions(servicePortName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go index 1ac1c76635f..84185042cf6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go @@ -27,7 +27,7 @@ import ( "time" "github.com/miekg/dns" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog" @@ -426,7 +426,7 @@ func processDNSQueryPacket( } // Query - Response bit that specifies whether this message is a query (0) or a response (1). - if msg.MsgHdr.Response == true { + if msg.MsgHdr.Response { return length, fmt.Errorf("DNS packet should be a query message") } @@ -473,7 +473,7 @@ func processDNSResponsePacket( } // Query - Response bit that specifies whether this message is a query (0) or a response (1). - if msg.MsgHdr.Response == false { + if !msg.MsgHdr.Response { return drop, length, fmt.Errorf("DNS packet should be a response message") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go index cba69f5073a..9b217489db3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go @@ -45,25 +45,6 @@ func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { return true } -// V1Equals returns true if the two lists are equivalent -func V1Equals(a corev1.ResourceList, b corev1.ResourceList) bool { - if len(a) != len(b) { - return false - } - - for key, value1 := range a { - value2, found := b[key] - if !found { - return false - } - if value1.Cmp(value2) != 0 { - return false - } - } - - return true -} - // LessThanOrEqual returns true if a < b for each key in b // If false, it returns the keys in a that exceeded b func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD index ce46d72cbb8..44473230159 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD @@ -3,37 +3,52 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "algorithm_factory.go", "eventhandlers.go", + "factory.go", "scheduler.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler", visibility = ["//visibility:public"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/features:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/api/latest:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/apis/config/scheme:go_default_library", + "//pkg/scheduler/apis/config/validation:go_default_library", "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/framework/plugins:go_default_library", + "//pkg/scheduler/framework/plugins/nodelabel:go_default_library", + "//pkg/scheduler/framework/plugins/requestedtocapacityratio:go_default_library", + "//pkg/scheduler/framework/plugins/serviceaffinity:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/cache/debugger:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/events:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -43,24 +58,32 @@ go_library( go_test( name = "go_default_test", srcs = [ + "algorithm_factory_test.go", "eventhandlers_test.go", + "factory_test.go", "scheduler_test.go", ], embed = [":go_default_library"], deps = [ + "//pkg/api/testing:go_default_library", "//pkg/controller/volume/scheduling:go_default_library", + "//pkg/features:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/apis/config/scheme:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/framework/plugins:go_default_library", + "//pkg/scheduler/framework/plugins/nodelabel:go_default_library", + "//pkg/scheduler/framework/plugins/serviceaffinity:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library", @@ -69,14 +92,21 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/events:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) @@ -95,11 +125,13 @@ filegroup( "//pkg/scheduler/algorithmprovider:all-srcs", "//pkg/scheduler/api:all-srcs", "//pkg/scheduler/apis/config:all-srcs", + "//pkg/scheduler/apis/extender/v1:all-srcs", "//pkg/scheduler/core:all-srcs", - "//pkg/scheduler/factory:all-srcs", "//pkg/scheduler/framework:all-srcs", "//pkg/scheduler/internal/cache:all-srcs", + "//pkg/scheduler/internal/heap:all-srcs", "//pkg/scheduler/internal/queue:all-srcs", + "//pkg/scheduler/listers:all-srcs", "//pkg/scheduler/metrics:all-srcs", "//pkg/scheduler/nodeinfo:all-srcs", "//pkg/scheduler/testing:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD index 415aa853026..4010474104a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD @@ -11,12 +11,15 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", deps = [ - "//pkg/scheduler/api:go_default_library", + "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD index 5e6c799e5c4..8b8d1a9a685 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD @@ -13,25 +13,21 @@ go_library( "error.go", "metadata.go", "predicates.go", - "testing_helper.go", "utils.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -59,15 +55,16 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go index ad86873897b..d853aa14dc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go @@ -20,41 +20,54 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/util/rand" - utilfeature "k8s.io/apiserver/pkg/util/feature" - csilib "k8s.io/csi-translation-lib" + corelisters "k8s.io/client-go/listers/core/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" + csitrans "k8s.io/csi-translation-lib" "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/features" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +// InTreeToCSITranslator contains methods required to check migratable status +// and perform translations from InTree PV's to CSI +type InTreeToCSITranslator interface { + IsPVMigratable(pv *v1.PersistentVolume) bool + IsMigratableIntreePluginByName(inTreePluginName string) bool + GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) + GetCSINameFromInTreeName(pluginName string) (string, error) + TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) +} + // CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes type CSIMaxVolumeLimitChecker struct { - csiNodeInfo CSINodeInfo - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo - scInfo StorageClassInfo + csiNodeLister storagelisters.CSINodeLister + pvLister corelisters.PersistentVolumeLister + pvcLister corelisters.PersistentVolumeClaimLister + scLister storagelisters.StorageClassLister randomVolumeIDPrefix string + + translator InTreeToCSITranslator } // NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes func NewCSIMaxVolumeLimitPredicate( - csiNodeInfo CSINodeInfo, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, scInfo StorageClassInfo) FitPredicate { + csiNodeLister storagelisters.CSINodeLister, pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate { c := &CSIMaxVolumeLimitChecker{ - csiNodeInfo: csiNodeInfo, - pvInfo: pvInfo, - pvcInfo: pvcInfo, - scInfo: scInfo, + csiNodeLister: csiNodeLister, + pvLister: pvLister, + pvcLister: pvcLister, + scLister: scLister, randomVolumeIDPrefix: rand.String(32), + translator: csitrans.New(), } return c.attachableLimitPredicate } -func (c *CSIMaxVolumeLimitChecker) getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1beta1.CSINode) map[v1.ResourceName]int64 { +func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 { // TODO: stop getting values from Node object in v1.18 nodeVolumeLimits := nodeInfo.VolumeLimits() if csiNode != nil { @@ -71,23 +84,19 @@ func (c *CSIMaxVolumeLimitChecker) getVolumeLimits(nodeInfo *schedulernodeinfo.N } func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( - pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { + pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { // If the new pod doesn't have any volume attached to it, the predicate will always be true if len(pod.Spec.Volumes) == 0 { return true, nil, nil } - if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { - return true, nil, nil - } - node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } // If CSINode doesn't exist, the predicate may read the limits from Node object - csiNode, err := c.csiNodeInfo.GetCSINodeInfo(node.Name) + csiNode, err := c.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default (2 releases) klog.V(5).Infof("Could not get a CSINode object for the node: %v", err) @@ -104,7 +113,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( } // If the node doesn't have volume limits, the predicate will always be true - nodeVolumeLimits := c.getVolumeLimits(nodeInfo, csiNode) + nodeVolumeLimits := getVolumeLimits(nodeInfo, csiNode) if len(nodeVolumeLimits) == 0 { return true, nil, nil } @@ -144,7 +153,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( } func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( - csiNode *storagev1beta1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error { + csiNode *storagev1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error { for _, vol := range volumes { // CSI volumes can only be used as persistent volumes if vol.PersistentVolumeClaim == nil { @@ -156,7 +165,7 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( return fmt.Errorf("PersistentVolumeClaim had no name") } - pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) + pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) if err != nil { klog.V(5).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName) @@ -179,7 +188,7 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( // getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC. // If the PVC is from a migrated in-tree plugin, this function will return // the information of the CSI driver that the plugin has been migrated to. -func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { +func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { pvName := pvc.Spec.VolumeName namespace := pvc.Namespace pvcName := pvc.Name @@ -189,7 +198,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN return c.getCSIDriverInfoFromSC(csiNode, pvc) } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) + pv, err := c.pvLister.Get(pvName) if err != nil { klog.V(5).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) // If we can't fetch PV associated with PVC, may be it got deleted @@ -201,11 +210,11 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN csiSource := pv.Spec.PersistentVolumeSource.CSI if csiSource == nil { // We make a fast path for non-CSI volumes that aren't migratable - if !csilib.IsPVMigratable(pv) { + if !c.translator.IsPVMigratable(pv) { return "", "" } - pluginName, err := csilib.GetInTreePluginNameFromSpec(pv, nil) + pluginName, err := c.translator.GetInTreePluginNameFromSpec(pv, nil) if err != nil { klog.V(5).Infof("Unable to look up plugin name from PV spec: %v", err) return "", "" @@ -216,7 +225,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN return "", "" } - csiPV, err := csilib.TranslateInTreePVToCSI(pv) + csiPV, err := c.translator.TranslateInTreePVToCSI(pv) if err != nil { klog.V(5).Infof("Unable to translate in-tree volume to CSI: %v", err) return "", "" @@ -234,7 +243,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN } // getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass. -func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { +func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { namespace := pvc.Namespace pvcName := pvc.Name scName := v1helper.GetPersistentVolumeClaimClass(pvc) @@ -246,7 +255,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta return "", "" } - storageClass, err := c.scInfo.GetStorageClassInfo(scName) + storageClass, err := c.scLister.Get(scName) if err != nil { klog.V(5).Infof("Could not get StorageClass for PVC %s/%s: %v", namespace, pvcName, err) return "", "" @@ -258,13 +267,13 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta volumeHandle := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) provisioner := storageClass.Provisioner - if csilib.IsMigratableIntreePluginByName(provisioner) { + if c.translator.IsMigratableIntreePluginByName(provisioner) { if !isCSIMigrationOn(csiNode, provisioner) { klog.V(5).Infof("CSI Migration of plugin %s is not enabled", provisioner) return "", "" } - driverName, err := csilib.GetCSINameFromInTreeName(provisioner) + driverName, err := c.translator.GetCSINameFromInTreeName(provisioner) if err != nil { klog.V(5).Infof("Unable to look up driver name from plugin name: %v", err) return "", "" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/error.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/error.go index d5325af3f2e..25378706b8e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/error.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/error.go @@ -82,6 +82,38 @@ var ( ErrFakePredicate = newPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate") ) +var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{ + ErrNodeSelectorNotMatch: {}, + ErrPodAffinityRulesNotMatch: {}, + ErrPodNotMatchHostName: {}, + ErrTaintsTolerationsNotMatch: {}, + ErrNodeLabelPresenceViolated: {}, + // Node conditions won't change when scheduler simulates removal of preemption victims. + // So, it is pointless to try nodes that have not been able to host the pod due to node + // conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, .... + ErrNodeNotReady: {}, + ErrNodeNetworkUnavailable: {}, + ErrNodeUnderDiskPressure: {}, + ErrNodeUnderPIDPressure: {}, + ErrNodeUnderMemoryPressure: {}, + ErrNodeUnschedulable: {}, + ErrNodeUnknownCondition: {}, + ErrVolumeZoneConflict: {}, + ErrVolumeNodeConflict: {}, + ErrVolumeBindConflict: {}, +} + +// UnresolvablePredicateExists checks if there is at least one unresolvable predicate failure reason, if true +// returns the first one in the list. +func UnresolvablePredicateExists(reasons []PredicateFailureReason) PredicateFailureReason { + for _, r := range reasons { + if _, ok := unresolvablePredicateFailureErrors[r]; ok { + return r + } + } + return nil +} + // InsufficientResourceError is an error type that indicates what kind of resource limit is // hit and caused the unfitting failure. type InsufficientResourceError struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go index 3e317f946c8..3f5e37a995b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go @@ -24,31 +24,26 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) -// PredicateMetadata interface represents anything that can access a predicate metadata. -type PredicateMetadata interface { - ShallowCopy() PredicateMetadata - AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error +// Metadata interface represents anything that can access a predicate metadata. +type Metadata interface { + ShallowCopy() Metadata + AddPod(addedPod *v1.Pod, node *v1.Node) error RemovePod(deletedPod *v1.Pod, node *v1.Node) error } -// PredicateMetadataProducer is a function that computes predicate metadata for a given pod. -type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata - -// PredicateMetadataFactory defines a factory of predicate metadata. -type PredicateMetadataFactory struct { - podLister algorithm.PodLister -} +// MetadataProducer is a function that computes predicate metadata for a given pod. +type MetadataProducer func(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata // AntiAffinityTerm's topology key value used in predicate metadata type topologyPair struct { @@ -116,11 +111,12 @@ func (paths *criticalPaths) update(tpVal string, num int32) { } } -// podSpreadCache combines tpKeyToCriticalPaths and tpPairToMatchNum +// evenPodsSpreadMetadata combines tpKeyToCriticalPaths and tpPairToMatchNum // to represent: // (1) critical paths where the least pods are matched on each spread constraint. // (2) number of pods matched on each spread constraint. -type podSpreadCache struct { +type evenPodsSpreadMetadata struct { + constraints []topologySpreadConstraint // We record 2 critical paths instead of all critical paths here. // criticalPaths[0].matchNum always holds the minimum matching number. // criticalPaths[1].matchNum is always greater or equal to criticalPaths[0].matchNum, but @@ -130,14 +126,66 @@ type podSpreadCache struct { tpPairToMatchNum map[topologyPair]int32 } -// NOTE: When new fields are added/removed or logic is changed, please make sure that -// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes. -type predicateMetadata struct { - pod *v1.Pod - podBestEffort bool - podRequest *schedulernodeinfo.Resource - podPorts []*v1.ContainerPort +// topologySpreadConstraint is an internal version for a hard (DoNotSchedule +// unsatisfiable constraint action) v1.TopologySpreadConstraint and where the +// selector is parsed. +type topologySpreadConstraint struct { + maxSkew int32 + topologyKey string + selector labels.Selector +} + +type serviceAffinityMetadata struct { + matchingPodList []*v1.Pod + matchingPodServices []*v1.Service +} + +func (m *serviceAffinityMetadata) addPod(addedPod *v1.Pod, pod *v1.Pod, node *v1.Node) { + // If addedPod is in the same namespace as the pod, update the list + // of matching pods if applicable. + if m == nil || addedPod.Namespace != pod.Namespace { + return + } + selector := CreateSelectorFromLabels(pod.Labels) + if selector.Matches(labels.Set(addedPod.Labels)) { + m.matchingPodList = append(m.matchingPodList, addedPod) + } +} + +func (m *serviceAffinityMetadata) removePod(deletedPod *v1.Pod, node *v1.Node) { + deletedPodFullName := schedutil.GetPodFullName(deletedPod) + + if m == nil || + len(m.matchingPodList) == 0 || + deletedPod.Namespace != m.matchingPodList[0].Namespace { + return + } + + for i, pod := range m.matchingPodList { + if schedutil.GetPodFullName(pod) == deletedPodFullName { + m.matchingPodList = append(m.matchingPodList[:i], m.matchingPodList[i+1:]...) + break + } + } +} + +func (m *serviceAffinityMetadata) clone() *serviceAffinityMetadata { + if m == nil { + return nil + } + + copy := serviceAffinityMetadata{} + + copy.matchingPodServices = append([]*v1.Service(nil), + m.matchingPodServices...) + copy.matchingPodList = append([]*v1.Pod(nil), + m.matchingPodList...) + + return © +} + +type podAffinityMetadata struct { topologyPairsAntiAffinityPodsMap *topologyPairsMaps // A map of topology pairs to a list of Pods that can potentially match // the affinity terms of the "pod" and its inverse. @@ -145,9 +193,70 @@ type predicateMetadata struct { // A map of topology pairs to a list of Pods that can potentially match // the anti-affinity terms of the "pod" and its inverse. topologyPairsPotentialAntiAffinityPods *topologyPairsMaps - serviceAffinityInUse bool - serviceAffinityMatchingPodList []*v1.Pod - serviceAffinityMatchingPodServices []*v1.Service +} + +func (m *podAffinityMetadata) addPod(addedPod *v1.Pod, pod *v1.Pod, node *v1.Node) error { + // Add matching anti-affinity terms of the addedPod to the map. + topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, addedPod, node) + if err != nil { + return err + } + m.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) + // Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed. + affinity := pod.Spec.Affinity + podNodeName := addedPod.Spec.NodeName + if affinity != nil && len(podNodeName) > 0 { + // It is assumed that when the added pod matches affinity of the pod, all the terms must match, + // this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties + // is changed + if targetPodMatchesAffinityOfPod(pod, addedPod) { + affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) + for _, term := range affinityTerms { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + m.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod) + } + } + } + if targetPodMatchesAntiAffinityOfPod(pod, addedPod) { + antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity) + for _, term := range antiAffinityTerms { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + m.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod) + } + } + } + } + + return nil +} + +func (m *podAffinityMetadata) removePod(deletedPod *v1.Pod) { + if m == nil { + return + } + + m.topologyPairsAntiAffinityPodsMap.removePod(deletedPod) + // Delete pod from the matching affinity or anti-affinity topology pairs maps. + m.topologyPairsPotentialAffinityPods.removePod(deletedPod) + m.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod) +} + +func (m *podAffinityMetadata) clone() *podAffinityMetadata { + if m == nil { + return nil + } + + copy := podAffinityMetadata{} + copy.topologyPairsPotentialAffinityPods = m.topologyPairsPotentialAffinityPods.clone() + copy.topologyPairsPotentialAntiAffinityPods = m.topologyPairsPotentialAntiAffinityPods.clone() + copy.topologyPairsAntiAffinityPodsMap = m.topologyPairsAntiAffinityPodsMap.clone() + + return © +} + +type podFitsResourcesMetadata struct { // ignoredExtendedResources is a set of extended resource names that will // be ignored in the PodFitsResources predicate. // @@ -155,84 +264,129 @@ type predicateMetadata struct { // which should be accounted only by the extenders. This set is synthesized // from scheduler extender configuration and does not change per pod. ignoredExtendedResources sets.String - // podSpreadCache holds info of the minimum match number on each topology spread constraint, + podRequest *schedulernodeinfo.Resource +} + +func (m *podFitsResourcesMetadata) clone() *podFitsResourcesMetadata { + if m == nil { + return nil + } + + copy := podFitsResourcesMetadata{} + copy.ignoredExtendedResources = m.ignoredExtendedResources + copy.podRequest = m.podRequest + + return © +} + +type podFitsHostPortsMetadata struct { + podPorts []*v1.ContainerPort +} + +func (m *podFitsHostPortsMetadata) clone() *podFitsHostPortsMetadata { + if m == nil { + return nil + } + + copy := podFitsHostPortsMetadata{} + copy.podPorts = append([]*v1.ContainerPort(nil), m.podPorts...) + + return © +} + +// NOTE: When new fields are added/removed or logic is changed, please make sure that +// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes. +type predicateMetadata struct { + pod *v1.Pod + podBestEffort bool + + // evenPodsSpreadMetadata holds info of the minimum match number on each topology spread constraint, // and the match number of all valid topology pairs. - podSpreadCache *podSpreadCache + evenPodsSpreadMetadata *evenPodsSpreadMetadata + + serviceAffinityMetadata *serviceAffinityMetadata + podAffinityMetadata *podAffinityMetadata + podFitsResourcesMetadata *podFitsResourcesMetadata + podFitsHostPortsMetadata *podFitsHostPortsMetadata } -// Ensure that predicateMetadata implements algorithm.PredicateMetadata. -var _ PredicateMetadata = &predicateMetadata{} +// Ensure that predicateMetadata implements algorithm.Metadata. +var _ Metadata = &predicateMetadata{} // predicateMetadataProducer function produces predicate metadata. It is stored in a global variable below -// and used to modify the return values of PredicateMetadataProducer +// and used to modify the return values of MetadataProducer type predicateMetadataProducer func(pm *predicateMetadata) var predicateMetadataProducers = make(map[string]predicateMetadataProducer) -// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer. +// RegisterPredicateMetadataProducer registers a MetadataProducer. func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) { predicateMetadataProducers[predicateName] = precomp } -// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type. -func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata { +// EmptyMetadataProducer returns a no-op MetadataProducer type. +func EmptyMetadataProducer(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata { return nil } // RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a -// PredicateMetadataProducer that creates predicate metadata with the provided +// MetadataProducer that creates predicate metadata with the provided // options for extended resources. // // See the comments in "predicateMetadata" for the explanation of the options. func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtendedResources sets.String) { RegisterPredicateMetadataProducer("PredicateWithExtendedResourceOptions", func(pm *predicateMetadata) { - pm.ignoredExtendedResources = ignoredExtendedResources + pm.podFitsResourcesMetadata.ignoredExtendedResources = ignoredExtendedResources }) } -// NewPredicateMetadataFactory creates a PredicateMetadataFactory. -func NewPredicateMetadataFactory(podLister algorithm.PodLister) PredicateMetadataProducer { - factory := &PredicateMetadataFactory{ - podLister, - } - return factory.GetMetadata -} +// MetadataProducerFactory is a factory to produce Metadata. +type MetadataProducerFactory struct{} -// GetMetadata returns the predicateMetadata used which will be used by various predicates. -func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata { +// GetPredicateMetadata returns the predicateMetadata which will be used by various predicates. +func (f *MetadataProducerFactory) GetPredicateMetadata(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata { // If we cannot compute metadata, just return nil if pod == nil { return nil } - // existingPodSpreadCache represents how existing pods match "pod" + + var allNodes []*schedulernodeinfo.NodeInfo + var havePodsWithAffinityNodes []*schedulernodeinfo.NodeInfo + if sharedLister != nil { + var err error + allNodes, err = sharedLister.NodeInfos().List() + if err != nil { + klog.Errorf("failed to list NodeInfos: %v", err) + return nil + } + havePodsWithAffinityNodes, err = sharedLister.NodeInfos().HavePodsWithAffinityList() + if err != nil { + klog.Errorf("failed to list NodeInfos: %v", err) + return nil + } + + } + + // evenPodsSpreadMetadata represents how existing pods match "pod" // on its spread constraints - existingPodSpreadCache, err := getExistingPodSpreadCache(pod, nodeNameToInfoMap) + evenPodsSpreadMetadata, err := getEvenPodsSpreadMetadata(pod, allNodes) if err != nil { klog.Errorf("Error calculating spreadConstraintsMap: %v", err) return nil } - // existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity - existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap) - if err != nil { - klog.Errorf("Error calculating existingPodAntiAffinityMap: %v", err) - return nil - } - // incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity - // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity - incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap) + + podAffinityMetadata, err := getPodAffinityMetadata(pod, allNodes, havePodsWithAffinityNodes) if err != nil { - klog.Errorf("Error calculating incomingPod(Anti)AffinityMap: %v", err) + klog.Errorf("Error calculating podAffinityMetadata: %v", err) return nil } + predicateMetadata := &predicateMetadata{ - pod: pod, - podBestEffort: isPodBestEffort(pod), - podRequest: GetResourceRequest(pod), - podPorts: schedutil.GetContainerPorts(pod), - topologyPairsPotentialAffinityPods: incomingPodAffinityMap, - topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap, - topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap, - podSpreadCache: existingPodSpreadCache, + pod: pod, + evenPodsSpreadMetadata: evenPodsSpreadMetadata, + podAffinityMetadata: podAffinityMetadata, + podFitsResourcesMetadata: getPodFitsResourcesMetedata(pod), + podFitsHostPortsMetadata: getPodFitsHostPortsMetadata(pod), } for predicateName, precomputeFunc := range predicateMetadataProducers { klog.V(10).Infof("Precompute: %v", predicateName) @@ -241,25 +395,55 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf return predicateMetadata } -func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*podSpreadCache, error) { +func getPodFitsHostPortsMetadata(pod *v1.Pod) *podFitsHostPortsMetadata { + return &podFitsHostPortsMetadata{ + podPorts: schedutil.GetContainerPorts(pod), + } +} + +func getPodFitsResourcesMetedata(pod *v1.Pod) *podFitsResourcesMetadata { + return &podFitsResourcesMetadata{ + podRequest: GetResourceRequest(pod), + } +} + +func getPodAffinityMetadata(pod *v1.Pod, allNodes []*schedulernodeinfo.NodeInfo, havePodsWithAffinityNodes []*schedulernodeinfo.NodeInfo) (*podAffinityMetadata, error) { + // existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity + existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, havePodsWithAffinityNodes) + if err != nil { + return nil, err + } + // incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity + // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity + incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, allNodes) + if err != nil { + return nil, err + } + + return &podAffinityMetadata{ + topologyPairsPotentialAffinityPods: incomingPodAffinityMap, + topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap, + topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap, + }, nil +} + +func getEvenPodsSpreadMetadata(pod *v1.Pod, allNodes []*schedulernodeinfo.NodeInfo) (*evenPodsSpreadMetadata, error) { // We have feature gating in APIServer to strip the spec // so don't need to re-check feature gate, just check length of constraints. - constraints := getHardTopologySpreadConstraints(pod) + constraints, err := filterHardTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints) + if err != nil { + return nil, err + } if len(constraints) == 0 { return nil, nil } - allNodeNames := make([]string, 0, len(nodeInfoMap)) - for name := range nodeInfoMap { - allNodeNames = append(allNodeNames, name) - } - - errCh := schedutil.NewErrorChannel() var lock sync.Mutex // TODO(Huang-Wei): It might be possible to use "make(map[topologyPair]*int32)". // In that case, need to consider how to init each tpPairToCount[pair] in an atomic fashion. - m := podSpreadCache{ + m := evenPodsSpreadMetadata{ + constraints: constraints, tpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)), tpPairToMatchNum: make(map[topologyPair]int32), } @@ -269,13 +453,11 @@ func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernod lock.Unlock() } - ctx, cancel := context.WithCancel(context.Background()) - processNode := func(i int) { - nodeInfo := nodeInfoMap[allNodeNames[i]] + nodeInfo := allNodes[i] node := nodeInfo.Node() if node == nil { - klog.Errorf("node %q not found", allNodeNames[i]) + klog.Error("node not found") return } // In accordance to design, if NodeAffinity or NodeSelector is defined, @@ -295,28 +477,19 @@ func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernod if existingPod.Namespace != pod.Namespace { continue } - ok, err := PodMatchesSpreadConstraint(existingPod.Labels, constraint) - if err != nil { - errCh.SendErrorWithCancel(err, cancel) - return - } - if ok { + if constraint.selector.Matches(labels.Set(existingPod.Labels)) { matchTotal++ } } - pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} + pair := topologyPair{key: constraint.topologyKey, value: node.Labels[constraint.topologyKey]} addTopologyPairMatchNum(pair, matchTotal) } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode) - - if err := errCh.ReceiveError(); err != nil { - return nil, err - } + workqueue.ParallelizeUntil(context.Background(), 16, len(allNodes), processNode) // calculate min match for each topology pair for i := 0; i < len(constraints); i++ { - key := constraints[i].TopologyKey + key := constraints[i].topologyKey m.tpKeyToCriticalPaths[key] = newCriticalPaths() } for pair, num := range m.tpPairToMatchNum { @@ -326,36 +499,28 @@ func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernod return &m, nil } -func getHardTopologySpreadConstraints(pod *v1.Pod) (constraints []v1.TopologySpreadConstraint) { - if pod != nil { - for _, constraint := range pod.Spec.TopologySpreadConstraints { - if constraint.WhenUnsatisfiable == v1.DoNotSchedule { - constraints = append(constraints, constraint) +func filterHardTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint) ([]topologySpreadConstraint, error) { + var result []topologySpreadConstraint + for _, c := range constraints { + if c.WhenUnsatisfiable == v1.DoNotSchedule { + selector, err := metav1.LabelSelectorAsSelector(c.LabelSelector) + if err != nil { + return nil, err } + result = append(result, topologySpreadConstraint{ + maxSkew: c.MaxSkew, + topologyKey: c.TopologyKey, + selector: selector, + }) } } - return -} - -// PodMatchesSpreadConstraint verifies if matches . -// Some corner cases: -// 1. podLabelSet = nil => returns (false, nil) -// 2. constraint.LabelSelector = nil => returns (false, nil) -func PodMatchesSpreadConstraint(podLabelSet labels.Set, constraint v1.TopologySpreadConstraint) (bool, error) { - selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector) - if err != nil { - return false, err - } - if !selector.Matches(podLabelSet) { - return false, nil - } - return true, nil + return result, nil } // NodeLabelsMatchSpreadConstraints checks if ALL topology keys in spread constraints are present in node labels. -func NodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []v1.TopologySpreadConstraint) bool { - for _, constraint := range constraints { - if _, ok := nodeLabels[constraint.TopologyKey]; !ok { +func NodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []topologySpreadConstraint) bool { + for _, c := range constraints { + if _, ok := nodeLabels[c.topologyKey]; !ok { return false } } @@ -364,8 +529,10 @@ func NodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints // returns a pointer to a new topologyPairsMaps func newTopologyPairsMaps() *topologyPairsMaps { - return &topologyPairsMaps{topologyPairToPods: make(map[topologyPair]podSet), - podToTopologyPairs: make(map[string]topologyPairSet)} + return &topologyPairsMaps{ + topologyPairToPods: make(map[topologyPair]podSet), + podToTopologyPairs: make(map[string]topologyPairSet), + } } func (m *topologyPairsMaps) addTopologyPair(pair topologyPair, pod *v1.Pod) { @@ -408,57 +575,55 @@ func (m *topologyPairsMaps) clone() *topologyPairsMaps { return copy } -func (c *podSpreadCache) addPod(addedPod, preemptorPod *v1.Pod, node *v1.Node) error { - return c.updatePod(addedPod, preemptorPod, node, 1) +func (m *evenPodsSpreadMetadata) addPod(addedPod, preemptorPod *v1.Pod, node *v1.Node) { + m.updatePod(addedPod, preemptorPod, node, 1) } -func (c *podSpreadCache) removePod(deletedPod, preemptorPod *v1.Pod, node *v1.Node) { - c.updatePod(deletedPod, preemptorPod, node, -1) +func (m *evenPodsSpreadMetadata) removePod(deletedPod, preemptorPod *v1.Pod, node *v1.Node) { + m.updatePod(deletedPod, preemptorPod, node, -1) } -func (c *podSpreadCache) updatePod(updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int32) error { - if updatedPod.Namespace != preemptorPod.Namespace || node == nil { - return nil +func (m *evenPodsSpreadMetadata) updatePod(updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int32) { + if m == nil || updatedPod.Namespace != preemptorPod.Namespace || node == nil { + return } - constraints := getHardTopologySpreadConstraints(preemptorPod) - if !NodeLabelsMatchSpreadConstraints(node.Labels, constraints) { - return nil + if !NodeLabelsMatchSpreadConstraints(node.Labels, m.constraints) { + return } podLabelSet := labels.Set(updatedPod.Labels) - for _, constraint := range constraints { - if match, err := PodMatchesSpreadConstraint(podLabelSet, constraint); err != nil { - return err - } else if !match { + for _, constraint := range m.constraints { + if !constraint.selector.Matches(podLabelSet) { continue } - k, v := constraint.TopologyKey, node.Labels[constraint.TopologyKey] + k, v := constraint.topologyKey, node.Labels[constraint.topologyKey] pair := topologyPair{key: k, value: v} - c.tpPairToMatchNum[pair] = c.tpPairToMatchNum[pair] + delta + m.tpPairToMatchNum[pair] = m.tpPairToMatchNum[pair] + delta - c.tpKeyToCriticalPaths[k].update(v, c.tpPairToMatchNum[pair]) + m.tpKeyToCriticalPaths[k].update(v, m.tpPairToMatchNum[pair]) } - return nil } -func (c *podSpreadCache) clone() *podSpreadCache { +func (m *evenPodsSpreadMetadata) clone() *evenPodsSpreadMetadata { // c could be nil when EvenPodsSpread feature is disabled - if c == nil { + if m == nil { return nil } - copy := podSpreadCache{ - tpKeyToCriticalPaths: make(map[string]*criticalPaths), - tpPairToMatchNum: make(map[topologyPair]int32), + cp := evenPodsSpreadMetadata{ + // constraints are shared because they don't change. + constraints: m.constraints, + tpKeyToCriticalPaths: make(map[string]*criticalPaths, len(m.tpKeyToCriticalPaths)), + tpPairToMatchNum: make(map[topologyPair]int32, len(m.tpPairToMatchNum)), } - for tpKey, paths := range c.tpKeyToCriticalPaths { - copy.tpKeyToCriticalPaths[tpKey] = &criticalPaths{paths[0], paths[1]} + for tpKey, paths := range m.tpKeyToCriticalPaths { + cp.tpKeyToCriticalPaths[tpKey] = &criticalPaths{paths[0], paths[1]} } - for tpPair, matchNum := range c.tpPairToMatchNum { + for tpPair, matchNum := range m.tpPairToMatchNum { copyPair := topologyPair{key: tpPair.key, value: tpPair.value} - copy.tpPairToMatchNum[copyPair] = matchNum + cp.tpPairToMatchNum[copyPair] = matchNum } - return © + return &cp } // RemovePod changes predicateMetadata assuming that the given `deletedPod` is @@ -468,111 +633,49 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod, node *v1.Node) erro if deletedPodFullName == schedutil.GetPodFullName(meta.pod) { return fmt.Errorf("deletedPod and meta.pod must not be the same") } - meta.topologyPairsAntiAffinityPodsMap.removePod(deletedPod) - // Delete pod from the matching affinity or anti-affinity topology pairs maps. - meta.topologyPairsPotentialAffinityPods.removePod(deletedPod) - meta.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod) - // Delete pod from the pod spread topology maps. - meta.podSpreadCache.removePod(deletedPod, meta.pod, node) - // All pods in the serviceAffinityMatchingPodList are in the same namespace. - // So, if the namespace of the first one is not the same as the namespace of the - // deletedPod, we don't need to check the list, as deletedPod isn't in the list. - if meta.serviceAffinityInUse && - len(meta.serviceAffinityMatchingPodList) > 0 && - deletedPod.Namespace == meta.serviceAffinityMatchingPodList[0].Namespace { - for i, pod := range meta.serviceAffinityMatchingPodList { - if schedutil.GetPodFullName(pod) == deletedPodFullName { - meta.serviceAffinityMatchingPodList = append( - meta.serviceAffinityMatchingPodList[:i], - meta.serviceAffinityMatchingPodList[i+1:]...) - break - } - } - } + meta.podAffinityMetadata.removePod(deletedPod) + meta.evenPodsSpreadMetadata.removePod(deletedPod, meta.pod, node) + meta.serviceAffinityMetadata.removePod(deletedPod, node) + return nil } -// AddPod changes predicateMetadata assuming that `newPod` is added to the +// AddPod changes predicateMetadata assuming that the given `addedPod` is added to the // system. -func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error { +func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, node *v1.Node) error { addedPodFullName := schedutil.GetPodFullName(addedPod) if addedPodFullName == schedutil.GetPodFullName(meta.pod) { return fmt.Errorf("addedPod and meta.pod must not be the same") } - if nodeInfo.Node() == nil { - return fmt.Errorf("invalid node in nodeInfo") + if node == nil { + return fmt.Errorf("node not found") } - // Add matching anti-affinity terms of the addedPod to the map. - topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, nodeInfo.Node()) - if err != nil { + + if err := meta.podAffinityMetadata.addPod(addedPod, meta.pod, node); err != nil { return err } - meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) - // Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed. - affinity := meta.pod.Spec.Affinity - podNodeName := addedPod.Spec.NodeName - if affinity != nil && len(podNodeName) > 0 { - podNode := nodeInfo.Node() - // It is assumed that when the added pod matches affinity of the meta.pod, all the terms must match, - // this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties - // is changed - if targetPodMatchesAffinityOfPod(meta.pod, addedPod) { - affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) - for _, term := range affinityTerms { - if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - meta.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod) - } - } - } - if targetPodMatchesAntiAffinityOfPod(meta.pod, addedPod) { - antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity) - for _, term := range antiAffinityTerms { - if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - meta.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod) - } - } - } - } - // Update meta.podSpreadCache if meta.pod has hard spread constraints + // Update meta.evenPodsSpreadMetadata if meta.pod has hard spread constraints // and addedPod matches that - if err := meta.podSpreadCache.addPod(addedPod, meta.pod, nodeInfo.Node()); err != nil { - return err - } + meta.evenPodsSpreadMetadata.addPod(addedPod, meta.pod, node) + + meta.serviceAffinityMetadata.addPod(addedPod, meta.pod, node) - // If addedPod is in the same namespace as the meta.pod, update the list - // of matching pods if applicable. - if meta.serviceAffinityInUse && addedPod.Namespace == meta.pod.Namespace { - selector := CreateSelectorFromLabels(meta.pod.Labels) - if selector.Matches(labels.Set(addedPod.Labels)) { - meta.serviceAffinityMatchingPodList = append(meta.serviceAffinityMatchingPodList, - addedPod) - } - } return nil } // ShallowCopy copies a metadata struct into a new struct and creates a copy of // its maps and slices, but it does not copy the contents of pointer values. -func (meta *predicateMetadata) ShallowCopy() PredicateMetadata { +func (meta *predicateMetadata) ShallowCopy() Metadata { newPredMeta := &predicateMetadata{ - pod: meta.pod, - podBestEffort: meta.podBestEffort, - podRequest: meta.podRequest, - serviceAffinityInUse: meta.serviceAffinityInUse, - ignoredExtendedResources: meta.ignoredExtendedResources, - } - newPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...) - newPredMeta.topologyPairsPotentialAffinityPods = meta.topologyPairsPotentialAffinityPods.clone() - newPredMeta.topologyPairsPotentialAntiAffinityPods = meta.topologyPairsPotentialAntiAffinityPods.clone() - newPredMeta.topologyPairsAntiAffinityPodsMap = meta.topologyPairsAntiAffinityPodsMap.clone() - newPredMeta.podSpreadCache = meta.podSpreadCache.clone() - newPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil), - meta.serviceAffinityMatchingPodServices...) - newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil), - meta.serviceAffinityMatchingPodList...) - return (PredicateMetadata)(newPredMeta) + pod: meta.pod, + podBestEffort: meta.podBestEffort, + } + newPredMeta.podFitsHostPortsMetadata = meta.podFitsHostPortsMetadata.clone() + newPredMeta.podAffinityMetadata = meta.podAffinityMetadata.clone() + newPredMeta.evenPodsSpreadMetadata = meta.evenPodsSpreadMetadata.clone() + newPredMeta.serviceAffinityMetadata = meta.serviceAffinityMetadata.clone() + newPredMeta.podFitsResourcesMetadata = meta.podFitsResourcesMetadata.clone() + return (Metadata)(newPredMeta) } type affinityTermProperties struct { @@ -627,12 +730,7 @@ func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTerm // getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: // (1) Whether it has PodAntiAffinity // (2) Whether any AffinityTerm matches the incoming pod -func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*topologyPairsMaps, error) { - allNodeNames := make([]string, 0, len(nodeInfoMap)) - for name := range nodeInfoMap { - allNodeNames = append(allNodeNames, name) - } - +func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*schedulernodeinfo.NodeInfo) (*topologyPairsMaps, error) { errCh := schedutil.NewErrorChannel() var lock sync.Mutex topologyMaps := newTopologyPairsMaps() @@ -646,10 +744,10 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s ctx, cancel := context.WithCancel(context.Background()) processNode := func(i int) { - nodeInfo := nodeInfoMap[allNodeNames[i]] + nodeInfo := allNodes[i] node := nodeInfo.Node() if node == nil { - klog.Errorf("node %q not found", allNodeNames[i]) + klog.Error("node not found") return } for _, existingPod := range nodeInfo.PodsWithAffinity() { @@ -658,10 +756,12 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s errCh.SendErrorWithCancel(err, cancel) return } - appendTopologyPairsMaps(existingPodTopologyMaps) + if existingPodTopologyMaps != nil { + appendTopologyPairsMaps(existingPodTopologyMaps) + } } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode) + workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode) if err := errCh.ReceiveError(); err != nil { return nil, err @@ -674,19 +774,13 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s // It returns a topologyPairsMaps that are checked later by the affinity // predicate. With this topologyPairsMaps available, the affinity predicate does not // need to check all the pods in the cluster. -func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { +func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*schedulernodeinfo.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return newTopologyPairsMaps(), newTopologyPairsMaps(), nil } - allNodeNames := make([]string, 0, len(nodeInfoMap)) - for name := range nodeInfoMap { - allNodeNames = append(allNodeNames, name) - } - errCh := schedutil.NewErrorChannel() - var lock sync.Mutex topologyPairsAffinityPodsMaps = newTopologyPairsMaps() topologyPairsAntiAffinityPodsMaps = newTopologyPairsMaps() @@ -712,10 +806,10 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s ctx, cancel := context.WithCancel(context.Background()) processNode := func(i int) { - nodeInfo := nodeInfoMap[allNodeNames[i]] + nodeInfo := allNodes[i] node := nodeInfo.Node() if node == nil { - klog.Errorf("node %q not found", allNodeNames[i]) + klog.Error("node not found") return } nodeTopologyPairsAffinityPodsMaps := newTopologyPairsMaps() @@ -751,7 +845,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps) } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode) + workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode) if err := errCh.ReceiveError(); err != nil { return nil, nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go index a53b979136c..592dcbb6aab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go @@ -26,9 +26,7 @@ import ( "k8s.io/klog" v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -40,11 +38,10 @@ import ( volumehelpers "k8s.io/cloud-provider/volume/helpers" csilibplugins "k8s.io/csi-translation-lib/plugins" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" @@ -56,8 +53,6 @@ const ( MatchInterPodAffinityPred = "MatchInterPodAffinity" // CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding. CheckVolumeBindingPred = "CheckVolumeBinding" - // CheckNodeConditionPred defines the name of predicate CheckNodeCondition. - CheckNodeConditionPred = "CheckNodeCondition" // GeneralPred defines the name of predicate GeneralPredicates. GeneralPred = "GeneralPredicates" // HostNamePred defines the name of predicate HostName. @@ -96,28 +91,22 @@ const ( // DEPRECATED // All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred. MaxCinderVolumeCountPred = "MaxCinderVolumeCount" - // MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached + // MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached. MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred" // NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict. NoVolumeZoneConflictPred = "NoVolumeZoneConflict" - // CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure. - CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure" - // CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure. - CheckNodeDiskPressurePred = "CheckNodeDiskPressure" - // CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure. - CheckNodePIDPressurePred = "CheckNodePIDPressure" - // EvenPodsSpreadPred defines the name of predicate EvenPodsSpread + // EvenPodsSpreadPred defines the name of predicate EvenPodsSpread. EvenPodsSpreadPred = "EvenPodsSpread" - // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE + // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE. // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 - // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure + // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure. // Larger Azure VMs can actually have much more disks attached. // TODO We should determine the max based on VM size DefaultMaxAzureDiskVolumes = 16 - // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet + // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet. KubeMaxPDVols = "KUBE_MAX_PD_VOLS" // EBSVolumeFilterType defines the filter name for EBSVolumeFilter. @@ -144,79 +133,23 @@ const ( // The order is based on the restrictiveness & complexity of predicates. // Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md var ( - predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred, + predicatesOrdering = []string{CheckNodeUnschedulablePred, GeneralPred, HostNamePred, PodFitsHostPortsPred, MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred, PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred, CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred, MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred, - CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, EvenPodsSpreadPred, MatchInterPodAffinityPred} + EvenPodsSpreadPred, MatchInterPodAffinityPred} ) -// FitPredicate is a function that indicates if a pod fits into an existing node. -// The failure information is given by the error. -type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) - -// NodeInfo interface represents anything that can get node object from node ID. -type NodeInfo interface { - GetNodeInfo(nodeID string) (*v1.Node, error) -} - -// CSINodeInfo interface represents anything that can get CSINode object from node ID. -type CSINodeInfo interface { - GetCSINodeInfo(nodeName string) (*storagev1beta1.CSINode, error) -} - -// PersistentVolumeInfo interface represents anything that can get persistent volume object by PV ID. -type PersistentVolumeInfo interface { - GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) -} - -// CachedPersistentVolumeInfo implements PersistentVolumeInfo -type CachedPersistentVolumeInfo struct { - corelisters.PersistentVolumeLister -} - // Ordering returns the ordering of predicates. func Ordering() []string { return predicatesOrdering } -// GetPersistentVolumeInfo returns a persistent volume object by PV ID. -func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { - return c.Get(pvID) -} - -// PersistentVolumeClaimInfo interface represents anything that can get a PVC object in -// specified namespace with specified name. -type PersistentVolumeClaimInfo interface { - GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) -} - -// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo -type CachedPersistentVolumeClaimInfo struct { - corelisters.PersistentVolumeClaimLister -} - -// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name -func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) { - return c.PersistentVolumeClaims(namespace).Get(name) -} - -// StorageClassInfo interface represents anything that can get a storage class object by class name. -type StorageClassInfo interface { - GetStorageClassInfo(className string) (*storagev1.StorageClass, error) -} - -// CachedStorageClassInfo implements StorageClassInfo -type CachedStorageClassInfo struct { - storagelisters.StorageClassLister -} - -// GetStorageClassInfo get StorageClass by class name. -func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) { - return c.Get(className) -} +// FitPredicate is a function that indicates if a pod fits into an existing node. +// The failure information is given by the error. +type FitPredicate func(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. @@ -274,7 +207,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only // - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only // TODO: migrate this into some per-volume specific code? -func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func NoDiskConflict(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { @@ -290,10 +223,10 @@ type MaxPDVolumeCountChecker struct { filter VolumeFilter volumeLimitKey v1.ResourceName maxVolumeFunc func(node *v1.Node) int - csiNodeInfo CSINodeInfo - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo - scInfo StorageClassInfo + csiNodeLister storagelisters.CSINodeLister + pvLister corelisters.PersistentVolumeLister + pvcLister corelisters.PersistentVolumeClaimLister + scLister storagelisters.StorageClassLister // The string below is generated randomly during the struct's initialization. // It is used to prefix volumeID generated inside the predicate() method to @@ -301,15 +234,15 @@ type MaxPDVolumeCountChecker struct { randomVolumeIDPrefix string } -// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps +// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps. type VolumeFilter struct { // Filter normal volumes FilterVolume func(vol *v1.Volume) (id string, relevant bool) FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) // MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate - MatchProvisioner func(sc *storagev1.StorageClass) (relevant bool) + MatchProvisioner func(sc *storage.StorageClass) (relevant bool) // IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver - IsMigrated func(csiNode *storagev1beta1.CSINode) bool + IsMigrated func(csiNode *storage.CSINode) bool } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the @@ -322,8 +255,8 @@ type VolumeFilter struct { // The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. -func NewMaxPDVolumeCountPredicate(filterName string, csiNodeInfo CSINodeInfo, scInfo StorageClassInfo, - pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate { +func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister, + pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate { var filter VolumeFilter var volumeLimitKey v1.ResourceName @@ -351,10 +284,10 @@ func NewMaxPDVolumeCountPredicate(filterName string, csiNodeInfo CSINodeInfo, sc filter: filter, volumeLimitKey: volumeLimitKey, maxVolumeFunc: getMaxVolumeFunc(filterName), - csiNodeInfo: csiNodeInfo, - pvInfo: pvInfo, - pvcInfo: pvcInfo, - scInfo: scInfo, + csiNodeLister: csiNodeLister, + pvLister: pvLister, + pvcLister: pvcLister, + scLister: scLister, randomVolumeIDPrefix: rand.String(32), } @@ -370,8 +303,9 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { var nodeInstanceType string for k, v := range node.ObjectMeta.Labels { - if k == v1.LabelInstanceType { + if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable { nodeInstanceType = v + break } } switch filterName { @@ -396,13 +330,13 @@ func getMaxEBSVolume(nodeInstanceType string) int { return volumeutil.DefaultMaxEBSVolumes } -// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value +// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value. func getMaxVolLimitFromEnv() int { if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) } else if parsedMaxVols <= 0 { - klog.Errorf("Maximum PD volumes must be a positive value, using default ") + klog.Errorf("Maximum PD volumes must be a positive value, using default") } else { return parsedMaxVols } @@ -427,7 +361,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s // to avoid conflicts with existing volume IDs. pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) - pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) + pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) if err != nil || pvc == nil { // If the PVC is invalid, we don't count the volume because // there's no guarantee that it belongs to the running predicate. @@ -448,7 +382,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s continue } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) + pv, err := c.pvLister.Get(pvName) if err != nil || pv == nil { // If the PV is invalid and PVC belongs to the running predicate, // log the error and count the PV towards the PV limit. @@ -474,7 +408,7 @@ func (c *MaxPDVolumeCountChecker) matchProvisioner(pvc *v1.PersistentVolumeClaim return false } - storageClass, err := c.scInfo.GetStorageClassInfo(*pvc.Spec.StorageClassName) + storageClass, err := c.scLister.Get(*pvc.Spec.StorageClassName) if err != nil || storageClass == nil { return false } @@ -482,7 +416,7 @@ func (c *MaxPDVolumeCountChecker) matchProvisioner(pvc *v1.PersistentVolumeClaim return c.filter.MatchProvisioner(storageClass) } -func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -504,11 +438,17 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, return false, nil, fmt.Errorf("node not found") } - csiNode, err := c.csiNodeInfo.GetCSINodeInfo(node.Name) - if err != nil { - // we don't fail here because the CSINode object is only necessary - // for determining whether the migration is enabled or not - klog.V(5).Infof("Could not get a CSINode object for the node: %v", err) + var ( + csiNode *storage.CSINode + err error + ) + if c.csiNodeLister != nil { + csiNode, err = c.csiNodeLister.Get(node.Name) + if err != nil { + // we don't fail here because the CSINode object is only necessary + // for determining whether the migration is enabled or not + klog.V(5).Infof("Could not get a CSINode object for the node: %v", err) + } } // if a plugin has been migrated to a CSI driver, defer to the CSI predicate @@ -535,11 +475,9 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, numNewVolumes := len(newVolumes) maxAttachLimit := c.maxVolumeFunc(node) - if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { - volumeLimits := nodeInfo.VolumeLimits() - if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok { - maxAttachLimit = int(maxAttachLimitFromAllocatable) - } + volumeLimits := nodeInfo.VolumeLimits() + if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok { + maxAttachLimit = int(maxAttachLimitFromAllocatable) } if numExistingVolumes+numNewVolumes > maxAttachLimit { @@ -555,7 +493,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, return true, nil, nil } -// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes +// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes. var EBSVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { @@ -571,19 +509,19 @@ var EBSVolumeFilter = VolumeFilter{ return "", false }, - MatchProvisioner: func(sc *storagev1.StorageClass) (relevant bool) { + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { if sc.Provisioner == csilibplugins.AWSEBSInTreePluginName { return true } return false }, - IsMigrated: func(csiNode *storagev1beta1.CSINode) bool { + IsMigrated: func(csiNode *storage.CSINode) bool { return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName) }, } -// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes +// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes. var GCEPDVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { @@ -599,19 +537,19 @@ var GCEPDVolumeFilter = VolumeFilter{ return "", false }, - MatchProvisioner: func(sc *storagev1.StorageClass) (relevant bool) { + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { if sc.Provisioner == csilibplugins.GCEPDInTreePluginName { return true } return false }, - IsMigrated: func(csiNode *storagev1beta1.CSINode) bool { + IsMigrated: func(csiNode *storage.CSINode) bool { return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName) }, } -// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes +// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes. var AzureDiskVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AzureDisk != nil { @@ -627,19 +565,19 @@ var AzureDiskVolumeFilter = VolumeFilter{ return "", false }, - MatchProvisioner: func(sc *storagev1.StorageClass) (relevant bool) { + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { if sc.Provisioner == csilibplugins.AzureDiskInTreePluginName { return true } return false }, - IsMigrated: func(csiNode *storagev1beta1.CSINode) bool { + IsMigrated: func(csiNode *storage.CSINode) bool { return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName) }, } -// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes +// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes. // It will be deprecated once Openstack cloudprovider has been removed from in-tree. var CinderVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { @@ -656,23 +594,23 @@ var CinderVolumeFilter = VolumeFilter{ return "", false }, - MatchProvisioner: func(sc *storagev1.StorageClass) (relevant bool) { + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { if sc.Provisioner == csilibplugins.CinderInTreePluginName { return true } return false }, - IsMigrated: func(csiNode *storagev1beta1.CSINode) bool { + IsMigrated: func(csiNode *storage.CSINode) bool { return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName) }, } // VolumeZoneChecker contains information to check the volume zone for a predicate. type VolumeZoneChecker struct { - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo - classInfo StorageClassInfo + pvLister corelisters.PersistentVolumeLister + pvcLister corelisters.PersistentVolumeClaimLister + scLister storagelisters.StorageClassLister } // NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given @@ -689,16 +627,16 @@ type VolumeZoneChecker struct { // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. -func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) FitPredicate { +func NewVolumeZonePredicate(pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate { c := &VolumeZoneChecker{ - pvInfo: pvInfo, - pvcInfo: pvcInfo, - classInfo: classInfo, + pvLister: pvLister, + pvcLister: pvcLister, + scLister: scLister, } return c.predicate } -func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -734,7 +672,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } - pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) + pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) if err != nil { return false, nil, err } @@ -747,12 +685,12 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI if pvName == "" { scName := v1helper.GetPersistentVolumeClaimClass(pvc) if len(scName) > 0 { - class, _ := c.classInfo.GetStorageClassInfo(scName) + class, _ := c.scLister.Get(scName) if class != nil { if class.VolumeBindingMode == nil { return false, nil, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", scName) } - if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer { // Skip unbound volumes continue } @@ -761,7 +699,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) + pv, err := c.pvLister.Get(pvName) if err != nil { return false, nil, err } @@ -845,7 +783,7 @@ func podName(pod *v1.Pod) string { // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // predicate failure reasons if the node has insufficient resources to run the pod. -func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func PodFitsResources(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -861,10 +799,10 @@ func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulerno ignoredExtendedResources := sets.NewString() var podRequest *schedulernodeinfo.Resource - if predicateMeta, ok := meta.(*predicateMetadata); ok { - podRequest = predicateMeta.podRequest - if predicateMeta.ignoredExtendedResources != nil { - ignoredExtendedResources = predicateMeta.ignoredExtendedResources + if predicateMeta, ok := meta.(*predicateMetadata); ok && predicateMeta.podFitsResourcesMetadata != nil { + podRequest = predicateMeta.podFitsResourcesMetadata.podRequest + if predicateMeta.podFitsResourcesMetadata.ignoredExtendedResources != nil { + ignoredExtendedResources = predicateMeta.podFitsResourcesMetadata.ignoredExtendedResources } } else { // We couldn't parse metadata - fallback to computing it. @@ -970,7 +908,7 @@ func PodMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool { } // PodMatchNodeSelector checks if a pod node selector matches the node label. -func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func PodMatchNodeSelector(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -982,7 +920,7 @@ func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedul } // PodFitsHost checks if a pod spec node name matches the current node. -func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func PodFitsHost(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } @@ -998,16 +936,18 @@ func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinf // NodeLabelChecker contains information to check node labels for a predicate. type NodeLabelChecker struct { - labels []string - presence bool + // presentLabels should be present for the node to be considered a fit for hosting the pod + presentLabels []string + // absentLabels should be absent for the node to be considered a fit for hosting the pod + absentLabels []string } // NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the // node labels which match a filter that it requests. -func NewNodeLabelPredicate(labels []string, presence bool) FitPredicate { +func NewNodeLabelPredicate(presentLabels []string, absentLabels []string) FitPredicate { labelChecker := &NodeLabelChecker{ - labels: labels, - presence: presence, + presentLabels: presentLabels, + absentLabels: absentLabels, } return labelChecker.CheckNodeLabelPresence } @@ -1023,30 +963,36 @@ func NewNodeLabelPredicate(labels []string, presence bool) FitPredicate { // // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value -// and it may be desirable to avoid scheduling new pods on this node -func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// and it may be desirable to avoid scheduling new pods on this node. +func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } - var exists bool nodeLabels := labels.Set(node.Labels) - for _, label := range n.labels { - exists = nodeLabels.Has(label) - if (exists && !n.presence) || (!exists && n.presence) { - return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil + check := func(labels []string, presence bool) bool { + for _, label := range labels { + exists := nodeLabels.Has(label) + if (exists && !presence) || (!exists && presence) { + return false + } } + return true } - return true, nil, nil + if check(n.presentLabels, true) && check(n.absentLabels, false) { + return true, nil, nil + } + + return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil } // ServiceAffinity defines a struct used for creating service affinity predicates. type ServiceAffinity struct { - podLister algorithm.PodLister - serviceLister algorithm.ServiceLister - nodeInfo NodeInfo - labels []string + nodeInfoLister schedulerlisters.NodeInfoLister + podLister schedulerlisters.PodLister + serviceLister corelisters.ServiceLister + labels []string } // serviceAffinityMetadataProducer should be run once by the scheduler before looping through the Predicate. It is a helper function that @@ -1056,10 +1002,8 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } - pm.serviceAffinityInUse = true - var err error // Store services which match the pod. - pm.serviceAffinityMatchingPodServices, err = s.serviceLister.GetPodServices(pm.pod) + matchingPodServices, err := s.serviceLister.GetPodServices(pm.pod) if err != nil { klog.Errorf("Error precomputing service affinity: could not list services: %v", err) } @@ -1070,22 +1014,26 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) } // consider only the pods that belong to the same namespace - pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) + matchingPodList := FilterPodsByNamespace(allMatches, pm.pod.Namespace) + pm.serviceAffinityMetadata = &serviceAffinityMetadata{ + matchingPodList: matchingPodList, + matchingPodServices: matchingPodServices, + } } // NewServiceAffinityPredicate creates a ServiceAffinity. -func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (FitPredicate, predicateMetadataProducer) { +func NewServiceAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (FitPredicate, predicateMetadataProducer) { affinity := &ServiceAffinity{ - podLister: podLister, - serviceLister: serviceLister, - nodeInfo: nodeInfo, - labels: labels, + nodeInfoLister: nodeInfoLister, + podLister: podLister, + serviceLister: serviceLister, + labels: labels, } return affinity.checkServiceAffinity, affinity.serviceAffinityMetadataProducer } // checkServiceAffinity is a predicate which matches nodes in such a way to force that -// ServiceAffinity.labels are homogenous for pods that are scheduled to a node. +// ServiceAffinity.labels are homogeneous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with the exact same ServiceAffinity.label values). // @@ -1111,17 +1059,17 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. -func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { var services []*v1.Service var pods []*v1.Pod - if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { - services = pm.serviceAffinityMatchingPodServices - pods = pm.serviceAffinityMatchingPodList + if pm, ok := meta.(*predicateMetadata); ok && pm.serviceAffinityMetadata != nil && (pm.serviceAffinityMetadata.matchingPodList != nil || pm.serviceAffinityMetadata.matchingPodServices != nil) { + services = pm.serviceAffinityMetadata.matchingPodServices + pods = pm.serviceAffinityMetadata.matchingPodList } else { // Make the predicate resilient in case metadata is missing. pm = &predicateMetadata{pod: pod} s.serviceAffinityMetadataProducer(pm) - pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices + pods, services = pm.serviceAffinityMetadata.matchingPodList, pm.serviceAffinityMetadata.matchingPodServices } filteredPods := nodeInfo.FilterOutPods(pods) node := nodeInfo.Node() @@ -1134,11 +1082,11 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada if len(s.labels) > len(affinityLabels) { if len(services) > 0 { if len(filteredPods) > 0 { - nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(filteredPods[0].Spec.NodeName) + nodeWithAffinityLabels, err := s.nodeInfoLister.Get(filteredPods[0].Spec.NodeName) if err != nil { return false, nil, err } - AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels)) + AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Node().Labels)) } } } @@ -1150,10 +1098,10 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada } // PodFitsHostPorts checks if a node has free ports for the requested pod ports. -func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func PodFitsHostPorts(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { var wantPorts []*v1.ContainerPort - if predicateMeta, ok := meta.(*predicateMetadata); ok { - wantPorts = predicateMeta.podPorts + if predicateMeta, ok := meta.(*predicateMetadata); ok && predicateMeta.podFitsHostPortsMetadata != nil { + wantPorts = predicateMeta.podFitsHostPortsMetadata.podPorts } else { // We couldn't parse metadata - fallback to computing it. wantPorts = schedutil.GetContainerPorts(pod) @@ -1172,7 +1120,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulerno return true, nil, nil } -// search two arrays and return true if they have at least one common element; return false otherwise +// haveOverlap searches two arrays and returns true if they have at least one common element; returns false otherwise. func haveOverlap(a1, a2 []string) bool { if len(a1) > len(a2) { a1, a2 = a2, a1 @@ -1192,8 +1140,8 @@ func haveOverlap(a1, a2 []string) bool { } // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates -// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need -func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need. +func GeneralPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { var predicateFails []PredicateFailureReason for _, predicate := range []FitPredicate{noncriticalPredicates, EssentialPredicates} { fit, reasons, err := predicate(pod, meta, nodeInfo) @@ -1208,8 +1156,8 @@ func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulern return len(predicateFails) == 0, predicateFails, nil } -// noncriticalPredicates are the predicates that only non-critical pods need -func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// noncriticalPredicates are the predicates that only non-critical pods need. +func noncriticalPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { var predicateFails []PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { @@ -1222,8 +1170,8 @@ func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedu return len(predicateFails) == 0, predicateFails, nil } -// EssentialPredicates are the predicates that all pods, including critical pods, need -func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// EssentialPredicates are the predicates that all pods, including critical pods, need. +func EssentialPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { var predicateFails []PredicateFailureReason // TODO: PodFitsHostPorts is essential for now, but kubelet should ideally // preempt pods to free up host ports too @@ -1242,15 +1190,15 @@ func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedule // PodAffinityChecker contains information to check pod affinity. type PodAffinityChecker struct { - info NodeInfo - podLister algorithm.PodLister + nodeInfoLister schedulerlisters.NodeInfoLister + podLister schedulerlisters.PodLister } // NewPodAffinityPredicate creates a PodAffinityChecker. -func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPredicate { +func NewPodAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, podLister schedulerlisters.PodLister) FitPredicate { checker := &PodAffinityChecker{ - info: info, - podLister: podLister, + nodeInfoLister: nodeInfoLister, + podLister: podLister, } return checker.InterPodAffinityMatches } @@ -1258,7 +1206,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPr // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // predicate failure reasons if the pod cannot be scheduled on the specified node. -func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -1304,7 +1252,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, return false, false, nil } // Namespace and selector of the terms have matched. Now we check topology of the terms. - targetPodNode, err := c.info.GetNodeInfo(targetPod.Spec.NodeName) + targetPodNodeInfo, err := c.nodeInfoLister.Get(targetPod.Spec.NodeName) if err != nil { return false, false, err } @@ -1312,7 +1260,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, if len(term.TopologyKey) == 0 { return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") } - if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNode, term.TopologyKey) { + if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo.Node(), term.TopologyKey) { return false, true, nil } } @@ -1377,16 +1325,12 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1. topologyMaps := newTopologyPairsMaps() for _, existingPod := range existingPods { - existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) + existingPodNodeInfo, err := c.nodeInfoLister.Get(existingPod.Spec.NodeName) if err != nil { - if apierrors.IsNotFound(err) { - klog.Errorf("Pod %s has NodeName %q but node is not found", - podName(existingPod), existingPod.Spec.NodeName) - continue - } - return nil, err + klog.Errorf("Pod %s has NodeName %q but node is not found", podName(existingPod), existingPod.Spec.NodeName) + continue } - existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode) + existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNodeInfo.Node()) if err != nil { return nil, err } @@ -1397,14 +1341,14 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1. // Checks if scheduling the pod onto this node would break any anti-affinity // terms indicated by the existing pods. -func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) { +func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { - return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") + return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("node not found") } var topologyMaps *topologyPairsMaps if predicateMeta, ok := meta.(*predicateMetadata); ok { - topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap + topologyMaps = predicateMeta.podAffinityMetadata.topologyPairsAntiAffinityPodsMap } else { // Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not // present in nodeInfo. Pods on other nodes pass the filter. @@ -1470,17 +1414,17 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai return false } -// Checks if scheduling the pod onto this node would break any term of this pod. +// satisfiesPodsAffinityAntiAffinity checks if scheduling the pod onto this node would break any term of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, - meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, + meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo, affinity *v1.Affinity) (PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { - return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil") + return ErrPodAffinityRulesNotMatch, fmt.Errorf("node not found") } if predicateMeta, ok := meta.(*predicateMetadata); ok { // Check all affinity terms. - topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods + topologyPairsPotentialAffinityPods := predicateMeta.podAffinityMetadata.topologyPairsPotentialAffinityPods if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) if !matchExists { @@ -1497,7 +1441,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, } // Check all anti-affinity terms. - topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods + topologyPairsPotentialAntiAffinityPods := predicateMeta.podAffinityMetadata.topologyPairsPotentialAntiAffinityPods if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) if matchExists { @@ -1573,14 +1517,14 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, } // CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec. -func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil } // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ - Key: schedulerapi.TaintNodeUnschedulable, + Key: v1.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule, }) @@ -1592,8 +1536,8 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeIn return true, nil, nil } -// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints -func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints. +func PodToleratesNodeTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil } @@ -1604,8 +1548,8 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *sched }) } -// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints -func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints. +func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return t.Effect == v1.TaintEffectNoExecute }) @@ -1623,80 +1567,6 @@ func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, f return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil } -// isPodBestEffort checks if pod is scheduled with best-effort QoS -func isPodBestEffort(pod *v1.Pod) bool { - return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort -} - -// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node -// reporting memory pressure condition. -func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { - var podBestEffort bool - if predicateMeta, ok := meta.(*predicateMetadata); ok { - podBestEffort = predicateMeta.podBestEffort - } else { - // We couldn't parse metadata - fallback to computing it. - podBestEffort = isPodBestEffort(pod) - } - // pod is not BestEffort pod - if !podBestEffort { - return true, nil, nil - } - - // check if node is under memory pressure - if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue { - return false, []PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil - } - return true, nil, nil -} - -// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node -// reporting disk pressure condition. -func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { - // check if node is under disk pressure - if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { - return false, []PredicateFailureReason{ErrNodeUnderDiskPressure}, nil - } - return true, nil, nil -} - -// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node -// reporting pid pressure condition. -func CheckNodePIDPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { - // check if node is under pid pressure - if nodeInfo.PIDPressureCondition() == v1.ConditionTrue { - return false, []PredicateFailureReason{ErrNodeUnderPIDPressure}, nil - } - return true, nil, nil -} - -// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting -// network unavailable and not ready condition. Only node conditions are accounted in this predicate. -func CheckNodeConditionPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { - reasons := []PredicateFailureReason{} - if nodeInfo == nil || nodeInfo.Node() == nil { - return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil - } - - node := nodeInfo.Node() - for _, cond := range node.Status.Conditions { - // We consider the node for scheduling only when its: - // - NodeReady condition status is ConditionTrue, - // - NodeNetworkUnavailable condition status is ConditionFalse. - if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue { - reasons = append(reasons, ErrNodeNotReady) - } else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse { - reasons = append(reasons, ErrNodeNetworkUnavailable) - } - } - - if node.Spec.Unschedulable { - reasons = append(reasons, ErrNodeUnschedulable) - } - - return len(reasons) == 0, reasons, nil -} - // VolumeBindingChecker contains information to check a volume binding. type VolumeBindingChecker struct { binder *volumebinder.VolumeBinder @@ -1729,7 +1599,7 @@ func podHasPVCs(pod *v1.Pod) bool { return false } -func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { // If pod does not request any PVC, we don't need to do anything. if !podHasPVCs(pod) { return true, nil, nil @@ -1767,60 +1637,52 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, no // EvenPodsSpreadPredicate checks if a pod can be scheduled on a node which satisfies // its topologySpreadConstraints. -func EvenPodsSpreadPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { +func EvenPodsSpreadPredicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } - constraints := getHardTopologySpreadConstraints(pod) - if len(constraints) == 0 { - return true, nil, nil - } - var podSpreadCache *podSpreadCache + var epsMeta *evenPodsSpreadMetadata if predicateMeta, ok := meta.(*predicateMetadata); ok { - podSpreadCache = predicateMeta.podSpreadCache + epsMeta = predicateMeta.evenPodsSpreadMetadata } else { // We don't have precomputed metadata. We have to follow a slow path to check spread constraints. // TODO(autoscaler): get it implemented return false, nil, errors.New("metadata not pre-computed for EvenPodsSpreadPredicate") } - if podSpreadCache == nil || len(podSpreadCache.tpPairToMatchNum) == 0 { + if epsMeta == nil || len(epsMeta.tpPairToMatchNum) == 0 || len(epsMeta.constraints) == 0 { return true, nil, nil } podLabelSet := labels.Set(pod.Labels) - for _, constraint := range constraints { - tpKey := constraint.TopologyKey - tpVal, ok := node.Labels[constraint.TopologyKey] + for _, c := range epsMeta.constraints { + tpKey := c.topologyKey + tpVal, ok := node.Labels[c.topologyKey] if !ok { klog.V(5).Infof("node '%s' doesn't have required label '%s'", node.Name, tpKey) return false, []PredicateFailureReason{ErrTopologySpreadConstraintsNotMatch}, nil } - selfMatch, err := PodMatchesSpreadConstraint(podLabelSet, constraint) - if err != nil { - return false, nil, err - } selfMatchNum := int32(0) - if selfMatch { + if c.selector.Matches(podLabelSet) { selfMatchNum = 1 } pair := topologyPair{key: tpKey, value: tpVal} - paths, ok := podSpreadCache.tpKeyToCriticalPaths[tpKey] + paths, ok := epsMeta.tpKeyToCriticalPaths[tpKey] if !ok { // error which should not happen - klog.Errorf("internal error: get paths from key %q of %#v", tpKey, podSpreadCache.tpKeyToCriticalPaths) + klog.Errorf("internal error: get paths from key %q of %#v", tpKey, epsMeta.tpKeyToCriticalPaths) continue } // judging criteria: // 'existing matching num' + 'if self-match (1 or 0)' - 'global min matching num' <= 'maxSkew' minMatchNum := paths[0].matchNum - matchNum := podSpreadCache.tpPairToMatchNum[pair] + matchNum := epsMeta.tpPairToMatchNum[pair] skew := matchNum + selfMatchNum - minMatchNum - if skew > constraint.MaxSkew { - klog.V(5).Infof("node '%s' failed spreadConstraint[%s]: matchNum(%d) + selfMatchNum(%d) - minMatchNum(%d) > maxSkew(%d)", node.Name, tpKey, matchNum, selfMatchNum, minMatchNum, constraint.MaxSkew) + if skew > c.maxSkew { + klog.V(5).Infof("node '%s' failed spreadConstraint[%s]: matchNum(%d) + selfMatchNum(%d) - minMatchNum(%d) > maxSkew(%d)", node.Name, tpKey, matchNum, selfMatchNum, minMatchNum, c.maxSkew) return false, []PredicateFailureReason{ErrTopologySpreadConstraintsNotMatch}, nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/testing_helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/testing_helper.go deleted file mode 100644 index e40fa5a355c..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/testing_helper.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicates - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" -) - -// FakePersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing. -type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim - -// GetPersistentVolumeClaimInfo gets PVC matching the namespace and PVC ID. -func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) { - for _, pvc := range pvcs { - if pvc.Name == pvcID && pvc.Namespace == namespace { - return &pvc, nil - } - } - return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID) -} - -// FakeNodeInfo declares a v1.Node type for testing. -type FakeNodeInfo v1.Node - -// GetNodeInfo return a fake node info object. -func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { - node := v1.Node(n) - return &node, nil -} - -// FakeNodeListInfo declares a []v1.Node type for testing. -type FakeNodeListInfo []v1.Node - -// GetNodeInfo returns a fake node object in the fake nodes. -func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { - for _, node := range nodes { - if node.Name == nodeName { - return &node, nil - } - } - return nil, fmt.Errorf("Unable to find node: %s", nodeName) -} - -// FakeCSINodeInfo declares a storagev1beta1.CSINode type for testing. -type FakeCSINodeInfo storagev1beta1.CSINode - -// GetCSINodeInfo returns a fake CSINode object. -func (n FakeCSINodeInfo) GetCSINodeInfo(name string) (*storagev1beta1.CSINode, error) { - csiNode := storagev1beta1.CSINode(n) - return &csiNode, nil -} - -// FakePersistentVolumeInfo declares a []v1.PersistentVolume type for testing. -type FakePersistentVolumeInfo []v1.PersistentVolume - -// GetPersistentVolumeInfo returns a fake PV object in the fake PVs by PV ID. -func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { - for _, pv := range pvs { - if pv.Name == pvID { - return &pv, nil - } - } - return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID) -} - -// FakeStorageClassInfo declares a []storagev1.StorageClass type for testing. -type FakeStorageClassInfo []storagev1.StorageClass - -// GetStorageClassInfo returns a fake storage class object in the fake storage classes by name. -func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) { - for _, sc := range classes { - if sc.Name == name { - return &sc, nil - } - } - return nil, fmt.Errorf("Unable to find storage class: %s", name) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go index 3115b4e8391..d74a4a106d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go @@ -19,8 +19,8 @@ package predicates import ( "strings" - "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -67,7 +67,7 @@ func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod { // CreateSelectorFromLabels is used to define a selector that corresponds to the keys in a map. func CreateSelectorFromLabels(aL map[string]string) labels.Selector { - if aL == nil || len(aL) == 0 { + if len(aL) == 0 { return labels.Everything() } return labels.Set(aL).AsSelector() @@ -97,7 +97,7 @@ func SetPredicatesOrderingDuringTest(value []string) func() { // isCSIMigrationOn returns a boolean value indicating whether // the CSI migration has been enabled for a particular storage plugin. -func isCSIMigrationOn(csiNode *storagev1beta1.CSINode, pluginName string) bool { +func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool { if csiNode == nil || len(pluginName) == 0 { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD index f57986635c4..dd610b299bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD @@ -33,20 +33,21 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -63,11 +64,11 @@ go_test( "metadata_test.go", "most_requested_test.go", "node_affinity_test.go", - "node_label_test.go", "node_prefer_avoid_pods_test.go", "requested_to_capacity_ratio_test.go", "resource_limits_test.go", "selector_spreading_test.go", + "spreading_perf_test.go", "taint_toleration_test.go", "types_test.go", ], @@ -76,8 +77,10 @@ go_test( "//pkg/features:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", @@ -86,7 +89,10 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index 37f21da2f7f..ef91169dda4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) var ( @@ -60,7 +60,7 @@ func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolu // Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the // score to be higher for node which has least variance and multiplying it with 10 provides the scaling // factor needed. - return int64((1 - variance) * float64(schedulerapi.MaxPriority)) + return int64((1 - variance) * float64(framework.MaxNodeScore)) } // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 @@ -68,7 +68,7 @@ func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolu // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. diff := math.Abs(cpuFraction - memoryFraction) - return int64((1 - diff) * float64(schedulerapi.MaxPriority)) + return int64((1 - diff) * float64(framework.MaxNodeScore)) } func fractionOfCapacity(requested, capacity int64) float64 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/even_pods_spread.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/even_pods_spread.go index 37cc073a158..5ef8e6d4bfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/even_pods_spread.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/even_pods_spread.go @@ -18,15 +18,18 @@ package priorities import ( "context" + "fmt" "math" "sync/atomic" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/klog" ) @@ -36,72 +39,49 @@ type topologyPair struct { value string } -type topologySpreadConstraintsMap struct { - // nodeNameToPodCounts is keyed with node name, and valued with the number of matching pods. - nodeNameToPodCounts map[string]int32 +type podTopologySpreadMap struct { + constraints []topologySpreadConstraint + // nodeNameSet is a string set holding all node names which have all constraints[*].topologyKey present. + nodeNameSet map[string]struct{} // topologyPairToPodCounts is keyed with topologyPair, and valued with the number of matching pods. - topologyPairToPodCounts map[topologyPair]*int32 + topologyPairToPodCounts map[topologyPair]*int64 } -func newTopologySpreadConstraintsMap() *topologySpreadConstraintsMap { - return &topologySpreadConstraintsMap{ - nodeNameToPodCounts: make(map[string]int32), - topologyPairToPodCounts: make(map[topologyPair]*int32), - } +// topologySpreadConstraint is an internal version for a soft (ScheduleAnyway +// unsatisfiable constraint action) v1.TopologySpreadConstraint and where the +// selector is parsed. +type topologySpreadConstraint struct { + topologyKey string + selector labels.Selector } -// Note: the passed in are the "filtered" nodes which have passed Predicates. -// This function iterates to filter out the nodes which don't have required topologyKey(s), -// and initialize two maps: -// 1) t.topologyPairToPodCounts: keyed with both eligible topology pair and node names. -// 2) t.nodeNameToPodCounts: keyed with node name, and valued with a *int32 pointer for eligible node only. -func (t *topologySpreadConstraintsMap) initialize(pod *v1.Pod, nodes []*v1.Node) { - constraints := getSoftTopologySpreadConstraints(pod) - for _, node := range nodes { - if !predicates.NodeLabelsMatchSpreadConstraints(node.Labels, constraints) { - continue - } - for _, constraint := range constraints { - pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} - if t.topologyPairToPodCounts[pair] == nil { - t.topologyPairToPodCounts[pair] = new(int32) - } - } - t.nodeNameToPodCounts[node.Name] = 0 - // For those nodes which don't have all required topologyKeys present, it's intentional to keep - // those entries absent in nodeNameToPodCounts, so that we're able to score them to 0 afterwards. +func newTopologySpreadConstraintsMap() *podTopologySpreadMap { + return &podTopologySpreadMap{ + nodeNameSet: make(map[string]struct{}), + topologyPairToPodCounts: make(map[topologyPair]*int64), } } -// CalculateEvenPodsSpreadPriority computes a score by checking through the topologySpreadConstraints -// that are with WhenUnsatisfiable=ScheduleAnyway (a.k.a soft constraint). -// The function works as below: -// 1) In all nodes, calculate the number of pods which match 's soft topology spread constraints. -// 2) Group the number calculated in 1) by topologyPair, and sum up to corresponding candidate nodes. -// 3) Finally normalize the number to 0~10. The node with the highest score is the most preferred. -// Note: Symmetry is not applicable. We only weigh how incomingPod matches existingPod. -// Whether existingPod matches incomingPod doesn't contribute to the final score. -// This is different from the Affinity API. -func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { - result := make(schedulerapi.HostPriorityList, len(nodes)) - // return if incoming pod doesn't have soft topology spread constraints. - constraints := getSoftTopologySpreadConstraints(pod) - if len(constraints) == 0 { - return result, nil +// buildPodTopologySpreadMap prepares necessary data (podTopologySpreadMap) for incoming pod on the filteredNodes. +// Later Priority function will use 'podTopologySpreadMap' to perform the Scoring calculations. +func buildPodTopologySpreadMap(pod *v1.Pod, filteredNodes []*v1.Node, allNodes []*schedulernodeinfo.NodeInfo) (*podTopologySpreadMap, error) { + if len(filteredNodes) == 0 || len(allNodes) == 0 { + return nil, nil } - t := newTopologySpreadConstraintsMap() - t.initialize(pod, nodes) - - allNodeNames := make([]string, 0, len(nodeNameToInfo)) - for name := range nodeNameToInfo { - allNodeNames = append(allNodeNames, name) + // initialize podTopologySpreadMap which will be used in Score plugin. + m := newTopologySpreadConstraintsMap() + err := m.initialize(pod, filteredNodes) + if err != nil { + return nil, err + } + // return if incoming pod doesn't have soft topology spread constraints. + if m.constraints == nil { + return nil, nil } - errCh := schedutil.NewErrorChannel() - ctx, cancel := context.WithCancel(context.Background()) processAllNode := func(i int) { - nodeInfo := nodeNameToInfo[allNodeNames[i]] + nodeInfo := allNodes[i] node := nodeInfo.Node() if node == nil { return @@ -109,101 +89,179 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch // (1) `node` should satisfy incoming pod's NodeSelector/NodeAffinity // (2) All topologyKeys need to be present in `node` if !predicates.PodMatchesNodeSelectorAndAffinityTerms(pod, node) || - !predicates.NodeLabelsMatchSpreadConstraints(node.Labels, constraints) { + !nodeLabelsMatchSpreadConstraints(node.Labels, m.constraints) { return } - for _, constraint := range constraints { - pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} + for _, c := range m.constraints { + pair := topologyPair{key: c.topologyKey, value: node.Labels[c.topologyKey]} // If current topology pair is not associated with any candidate node, // continue to avoid unnecessary calculation. - if t.topologyPairToPodCounts[pair] == nil { + if m.topologyPairToPodCounts[pair] == nil { continue } // indicates how many pods (on current node) match the . - matchSum := int32(0) + matchSum := int64(0) for _, existingPod := range nodeInfo.Pods() { - match, err := predicates.PodMatchesSpreadConstraint(existingPod.Labels, constraint) - if err != nil { - errCh.SendErrorWithCancel(err, cancel) - return - } - if match { + if c.selector.Matches(labels.Set(existingPod.Labels)) { matchSum++ } } - atomic.AddInt32(t.topologyPairToPodCounts[pair], matchSum) + atomic.AddInt64(m.topologyPairToPodCounts[pair], matchSum) } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processAllNode) - if err := errCh.ReceiveError(); err != nil { - return nil, err - } + workqueue.ParallelizeUntil(context.Background(), 16, len(allNodes), processAllNode) + + return m, nil +} - var minCount int32 = math.MaxInt32 - // sums up the number of matching pods on each qualified topology pair - var total int32 - for _, node := range nodes { - if _, ok := t.nodeNameToPodCounts[node.Name]; !ok { +// initialize iterates "filteredNodes" to filter out the nodes which don't have required topologyKey(s), +// and initialize two maps: +// 1) m.topologyPairToPodCounts: keyed with both eligible topology pair and node names. +// 2) m.nodeNameSet: keyed with node name, and valued with a *int64 pointer for eligible node only. +func (m *podTopologySpreadMap) initialize(pod *v1.Pod, filteredNodes []*v1.Node) error { + constraints, err := filterSoftTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints) + if err != nil { + return err + } + if constraints == nil { + return nil + } + m.constraints = constraints + for _, node := range filteredNodes { + if !nodeLabelsMatchSpreadConstraints(node.Labels, m.constraints) { continue } - - // For each present , current node gets a credit of . - // And we add to to reverse the final score later. - for _, constraint := range constraints { - if tpVal, ok := node.Labels[constraint.TopologyKey]; ok { - pair := topologyPair{key: constraint.TopologyKey, value: tpVal} - matchSum := *t.topologyPairToPodCounts[pair] - t.nodeNameToPodCounts[node.Name] += matchSum - total += matchSum + for _, constraint := range m.constraints { + pair := topologyPair{key: constraint.topologyKey, value: node.Labels[constraint.topologyKey]} + if m.topologyPairToPodCounts[pair] == nil { + m.topologyPairToPodCounts[pair] = new(int64) } } - if t.nodeNameToPodCounts[node.Name] < minCount { - minCount = t.nodeNameToPodCounts[node.Name] + m.nodeNameSet[node.Name] = struct{}{} + // For those nodes which don't have all required topologyKeys present, it's intentional to leave + // their entries absent in nodeNameSet, so that we're able to score them to 0 afterwards. + } + return nil +} + +// CalculateEvenPodsSpreadPriorityMap calculate the number of matching pods on the passed-in "node", +// and return the number as Score. +func CalculateEvenPodsSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { + node := nodeInfo.Node() + if node == nil { + return framework.NodeScore{}, fmt.Errorf("node not found") + } + + var m *podTopologySpreadMap + if priorityMeta, ok := meta.(*priorityMetadata); ok { + m = priorityMeta.podTopologySpreadMap + } + if m == nil { + return framework.NodeScore{Name: node.Name, Score: 0}, nil + } + + // no need to continue if the node is not qualified. + if _, ok := m.nodeNameSet[node.Name]; !ok { + return framework.NodeScore{Name: node.Name, Score: 0}, nil + } + + // For each present , current node gets a credit of . + // And we sum up and return it as this node's score. + var score int64 + for _, c := range m.constraints { + if tpVal, ok := node.Labels[c.topologyKey]; ok { + pair := topologyPair{key: c.topologyKey, value: tpVal} + matchSum := *m.topologyPairToPodCounts[pair] + score += matchSum } } + return framework.NodeScore{Name: node.Name, Score: score}, nil +} - // calculate final priority score for each node - // TODO(Huang-Wei): in alpha version, we keep the formula as simple as possible. - // current version ranks the nodes properly, but it doesn't take MaxSkew into - // consideration, we may come up with a better formula in the future. - maxMinDiff := total - minCount - for i := range nodes { - node := nodes[i] - result[i].Host = node.Name +// CalculateEvenPodsSpreadPriorityReduce normalizes the score for each filteredNode, +// The basic rule is: the bigger the score(matching number of pods) is, the smaller the +// final normalized score will be. +func CalculateEvenPodsSpreadPriorityReduce(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, + result framework.NodeScoreList) error { + var m *podTopologySpreadMap + if priorityMeta, ok := meta.(*priorityMetadata); ok { + m = priorityMeta.podTopologySpreadMap + } + if m == nil { + return nil + } - // debugging purpose: print the value for each node - // score must be pointer here, otherwise it's always 0 + // Calculate the summed score and . + var minScore int64 = math.MaxInt64 + var total int64 + for _, score := range result { + // it's mandatory to check if is present in m.nodeNameSet + if _, ok := m.nodeNameSet[score.Name]; !ok { + continue + } + total += score.Score + if score.Score < minScore { + minScore = score.Score + } + } + + maxMinDiff := total - minScore + for i := range result { + nodeInfo, err := sharedLister.NodeInfos().Get(result[i].Name) + if err != nil { + return err + } + node := nodeInfo.Node() + // Debugging purpose: print the score for each node. + // Score must be a pointer here, otherwise it's always 0. if klog.V(10) { - defer func(score *int, nodeName string) { - klog.Infof("%v -> %v: EvenPodsSpreadPriority, Score: (%d)", pod.Name, nodeName, *score) + defer func(score *int64, nodeName string) { + klog.Infof("%v -> %v: PodTopologySpread NormalizeScore, Score: (%d)", pod.Name, nodeName, *score) }(&result[i].Score, node.Name) } - if _, ok := t.nodeNameToPodCounts[node.Name]; !ok { - result[i].Score = 0 + if maxMinDiff == 0 { + result[i].Score = framework.MaxNodeScore continue } - if maxMinDiff == 0 { - result[i].Score = schedulerapi.MaxPriority + + if _, ok := m.nodeNameSet[node.Name]; !ok { + result[i].Score = 0 continue } - fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff)) - result[i].Score = int(fScore) - } - return result, nil + flippedScore := total - result[i].Score + fScore := float64(framework.MaxNodeScore) * (float64(flippedScore) / float64(maxMinDiff)) + result[i].Score = int64(fScore) + } + return nil } -// TODO(Huang-Wei): combine this with getHardTopologySpreadConstraints() in predicates package -func getSoftTopologySpreadConstraints(pod *v1.Pod) (constraints []v1.TopologySpreadConstraint) { - if pod != nil { - for _, constraint := range pod.Spec.TopologySpreadConstraints { - if constraint.WhenUnsatisfiable == v1.ScheduleAnyway { - constraints = append(constraints, constraint) +func filterSoftTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint) ([]topologySpreadConstraint, error) { + var r []topologySpreadConstraint + for _, c := range constraints { + if c.WhenUnsatisfiable == v1.ScheduleAnyway { + selector, err := metav1.LabelSelectorAsSelector(c.LabelSelector) + if err != nil { + return nil, err } + r = append(r, topologySpreadConstraint{ + topologyKey: c.TopologyKey, + selector: selector, + }) + } + } + return r, nil +} + +// nodeLabelsMatchSpreadConstraints checks if ALL topology keys in spread constraints are present in node labels. +func nodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []topologySpreadConstraint) bool { + for _, c := range constraints { + if _, ok := nodeLabels[c.topologyKey]; !ok { + return false } } - return + return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go index cc1db725ad9..0789d442142 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + v1 "k8s.io/api/core/v1" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -39,10 +39,10 @@ const ( // based on the total size of those images. // - If none of the images are present, this node will be given the lowest priority. // - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority. -func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } var score int @@ -53,9 +53,9 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler score = 0 } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: score, + return framework.NodeScore{ + Name: node.Name, + Score: int64(score), }, nil } @@ -68,7 +68,7 @@ func calculatePriority(sumScores int64) int { sumScores = maxThreshold } - return int(int64(schedulerapi.MaxPriority) * (sumScores - minThreshold) / (maxThreshold - minThreshold)) + return int(int64(framework.MaxNodeScore) * (sumScores - minThreshold) / (maxThreshold - minThreshold)) } // sumImageScores returns the sum of image scores of all the containers that are already on the node. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go index f720320a710..0e45781ce57 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -18,50 +18,35 @@ package priorities import ( "context" - "sync/atomic" + "fmt" + "sync" - "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/klog" ) -// InterPodAffinity contains information to calculate inter pod affinity. -type InterPodAffinity struct { - info predicates.NodeInfo - hardPodAffinityWeight int32 -} - -// NewInterPodAffinityPriority creates an InterPodAffinity. -func NewInterPodAffinityPriority( - info predicates.NodeInfo, - hardPodAffinityWeight int32) PriorityFunction { - interPodAffinity := &InterPodAffinity{ - info: info, - hardPodAffinityWeight: hardPodAffinityWeight, - } - return interPodAffinity.CalculateInterPodAffinityPriority -} +type topologyPairToScore map[string]map[string]int64 type podAffinityPriorityMap struct { - // nodes contain all nodes that should be considered + // nodes contain all nodes that should be considered. nodes []*v1.Node - // counts store the mapping from node name to so-far computed score of - // the node. - counts map[string]*int64 + // tracks a topology pair score so far. + topologyScore topologyPairToScore + sync.Mutex } func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap { return &podAffinityPriorityMap{ - nodes: nodes, - counts: make(map[string]*int64, len(nodes)), + nodes: nodes, + topologyScore: make(topologyPairToScore), } } @@ -71,13 +56,19 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini if err != nil { return err } + if len(fixedNode.Labels) == 0 { + return nil + } + match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector) - if match { - for _, node := range p.nodes { - if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) { - atomic.AddInt64(p.counts[node.Name], weight) - } + tpValue, tpValueExist := fixedNode.Labels[term.TopologyKey] + if match && tpValueExist { + p.Lock() + if p.topologyScore[term.TopologyKey] == nil { + p.topologyScore[term.TopologyKey] = make(map[string]int64) } + p.topologyScore[term.TopologyKey][tpValue] += weight + p.Unlock() } return nil } @@ -92,44 +83,107 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm return nil } -// CalculateInterPodAffinityPriority compute a sum by iterating through the elements of weightedPodAffinityTerm and adding -// "weight" to the sum if the corresponding PodAffinityTerm is satisfied for -// that node; the node(s) with the highest sum are the most preferred. -// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, -// symmetry need to be considered for hard requirements from podAffinity -func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +// CalculateInterPodAffinityPriorityMap calculate the number of matching pods on the passed-in "node", +// and return the number as Score. +func CalculateInterPodAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { + node := nodeInfo.Node() + if node == nil { + return framework.NodeScore{}, fmt.Errorf("node not found") + } + + var topologyScore topologyPairToScore + if priorityMeta, ok := meta.(*priorityMetadata); ok { + topologyScore = priorityMeta.topologyScore + } + + var score int64 + for tpKey, tpValues := range topologyScore { + if v, exist := node.Labels[tpKey]; exist { + score += tpValues[v] + } + } + + return framework.NodeScore{Name: node.Name, Score: score}, nil +} + +// CalculateInterPodAffinityPriorityReduce normalizes the score for each filteredNode, +// The basic rule is: the bigger the score(matching number of pods) is, the smaller the +// final normalized score will be. +func CalculateInterPodAffinityPriorityReduce(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, + result framework.NodeScoreList) error { + var topologyScore topologyPairToScore + if priorityMeta, ok := meta.(*priorityMetadata); ok { + topologyScore = priorityMeta.topologyScore + } + if len(topologyScore) == 0 { + return nil + } + + var maxCount, minCount int64 + for i := range result { + score := result[i].Score + if score > maxCount { + maxCount = score + } + if score < minCount { + minCount = score + } + } + + maxMinDiff := maxCount - minCount + for i := range result { + fScore := float64(0) + if maxMinDiff > 0 { + fScore = float64(framework.MaxNodeScore) * (float64(result[i].Score-minCount) / float64(maxMinDiff)) + } + + result[i].Score = int64(fScore) + } + + return nil +} + +func buildTopologyPairToScore( + pod *v1.Pod, + sharedLister schedulerlisters.SharedLister, + filteredNodes []*v1.Node, + hardPodAffinityWeight int32, +) topologyPairToScore { + if sharedLister == nil { + klog.Error("BuildTopologyPairToScore with empty shared lister") + return nil + } + affinity := pod.Spec.Affinity hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil - // priorityMap stores the mapping from node name to so-far computed score of - // the node. - pm := newPodAffinityPriorityMap(nodes) - allNodeNames := make([]string, 0, len(nodeNameToInfo)) - lazyInit := hasAffinityConstraints || hasAntiAffinityConstraints - for name := range nodeNameToInfo { - allNodeNames = append(allNodeNames, name) - // if pod has affinity defined, or target node has affinityPods - if lazyInit || len(nodeNameToInfo[name].PodsWithAffinity()) != 0 { - pm.counts[name] = new(int64) + // pm stores (1) all nodes that should be considered and (2) the so-far computed score for each node. + pm := newPodAffinityPriorityMap(filteredNodes) + + allNodes, err := sharedLister.NodeInfos().HavePodsWithAffinityList() + if err != nil { + klog.Errorf("get pods with affinity list error, err: %v", err) + return nil + } + if hasAffinityConstraints || hasAntiAffinityConstraints { + allNodes, err = sharedLister.NodeInfos().List() + if err != nil { + klog.Errorf("get all nodes from shared lister error, err: %v", err) + return nil } } - // convert the topology key based weights to the node name based weights - var maxCount, minCount int64 - processPod := func(existingPod *v1.Pod) error { - existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName) + existingPodNodeInfo, err := sharedLister.NodeInfos().Get(existingPod.Spec.NodeName) if err != nil { - if apierrors.IsNotFound(err) { - klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) - return nil - } - return err + klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + return nil } existingPodAffinity := existingPod.Spec.Affinity existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil + existingPodNode := existingPodNodeInfo.Node() if hasAffinityConstraints { // For every soft pod affinity term of , if matches the term, @@ -154,14 +208,14 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node // For every hard pod affinity term of , if matches the term, // increment for every node in the cluster with the same // value as that of 's node by the constant - if ipa.hardPodAffinityWeight > 0 { + if hardPodAffinityWeight > 0 { terms := existingPodAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} for _, term := range terms { - if err := pm.processTerm(&term, existingPod, pod, existingPodNode, int64(ipa.hardPodAffinityWeight)); err != nil { + if err := pm.processTerm(&term, existingPod, pod, existingPodNode, int64(hardPodAffinityWeight)); err != nil { return err } } @@ -189,7 +243,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node errCh := schedutil.NewErrorChannel() ctx, cancel := context.WithCancel(context.Background()) processNode := func(i int) { - nodeInfo := nodeNameToInfo[allNodeNames[i]] + nodeInfo := allNodes[i] if nodeInfo.Node() != nil { if hasAffinityConstraints || hasAntiAffinityConstraints { // We need to process all the pods. @@ -211,35 +265,11 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node } } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode) + workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode) if err := errCh.ReceiveError(); err != nil { - return nil, err - } - - for _, node := range nodes { - if pm.counts[node.Name] == nil { - continue - } - if *pm.counts[node.Name] > maxCount { - maxCount = *pm.counts[node.Name] - } - if *pm.counts[node.Name] < minCount { - minCount = *pm.counts[node.Name] - } + klog.Error(err) + return nil } - // calculate final priority score for each node - result := make(schedulerapi.HostPriorityList, 0, len(nodes)) - maxMinDiff := maxCount - minCount - for _, node := range nodes { - fScore := float64(0) - if maxMinDiff > 0 && pm.counts[node.Name] != nil { - fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount)) - } - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) - if klog.V(10) { - klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) - } - } - return result, nil + return pm.topologyScore } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go index c7e37eb5e83..371291b99b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go @@ -16,9 +16,7 @@ limitations under the License. package priorities -import ( - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" -) +import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" var ( leastRequestedRatioResources = DefaultRequestedRatioResources @@ -54,5 +52,5 @@ func leastRequestedScore(requested, capacity int64) int64 { return 0 } - return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity + return ((capacity - requested) * int64(framework.MaxNodeScore)) / capacity } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go index 9225b68cafd..fa39fdb3f75 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go @@ -20,25 +20,36 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) -// PriorityMetadataFactory is a factory to produce PriorityMetadata. -type PriorityMetadataFactory struct { - serviceLister algorithm.ServiceLister - controllerLister algorithm.ControllerLister - replicaSetLister algorithm.ReplicaSetLister - statefulSetLister algorithm.StatefulSetLister +// MetadataFactory is a factory to produce PriorityMetadata. +type MetadataFactory struct { + serviceLister corelisters.ServiceLister + controllerLister corelisters.ReplicationControllerLister + replicaSetLister appslisters.ReplicaSetLister + statefulSetLister appslisters.StatefulSetLister + hardPodAffinityWeight int32 } -// NewPriorityMetadataFactory creates a PriorityMetadataFactory. -func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) PriorityMetadataProducer { - factory := &PriorityMetadataFactory{ - serviceLister: serviceLister, - controllerLister: controllerLister, - replicaSetLister: replicaSetLister, - statefulSetLister: statefulSetLister, +// NewMetadataFactory creates a MetadataFactory. +func NewMetadataFactory( + serviceLister corelisters.ServiceLister, + controllerLister corelisters.ReplicationControllerLister, + replicaSetLister appslisters.ReplicaSetLister, + statefulSetLister appslisters.StatefulSetLister, + hardPodAffinityWeight int32, +) MetadataProducer { + factory := &MetadataFactory{ + serviceLister: serviceLister, + controllerLister: controllerLister, + replicaSetLister: replicaSetLister, + statefulSetLister: statefulSetLister, + hardPodAffinityWeight: hardPodAffinityWeight, } return factory.PriorityMetadata } @@ -48,68 +59,100 @@ type priorityMetadata struct { podLimits *schedulernodeinfo.Resource podTolerations []v1.Toleration affinity *v1.Affinity - podSelectors []labels.Selector + podSelector labels.Selector controllerRef *metav1.OwnerReference podFirstServiceSelector labels.Selector totalNumNodes int + podTopologySpreadMap *podTopologySpreadMap + topologyScore topologyPairToScore } -// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil. -func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} { +// PriorityMetadata is a MetadataProducer. Node info can be nil. +func (pmf *MetadataFactory) PriorityMetadata( + pod *v1.Pod, + filteredNodes []*v1.Node, + sharedLister schedulerlisters.SharedLister, +) interface{} { // If we cannot compute metadata, just return nil if pod == nil { return nil } + totalNumNodes := 0 + var allNodes []*schedulernodeinfo.NodeInfo + if sharedLister != nil { + if l, err := sharedLister.NodeInfos().List(); err == nil { + totalNumNodes = len(l) + allNodes = l + } + } + tpSpreadMap, err := buildPodTopologySpreadMap(pod, filteredNodes, allNodes) + if err != nil { + klog.Errorf("Error building podTopologySpreadMap: %v", err) + return nil + } return &priorityMetadata{ podLimits: getResourceLimits(pod), podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations), affinity: pod.Spec.Affinity, - podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister), + podSelector: getSelector(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister), controllerRef: metav1.GetControllerOf(pod), podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister), - totalNumNodes: len(nodeNameToInfo), + totalNumNodes: totalNumNodes, + podTopologySpreadMap: tpSpreadMap, + topologyScore: buildTopologyPairToScore(pod, sharedLister, filteredNodes, pmf.hardPodAffinityWeight), } } // getFirstServiceSelector returns one selector of services the given pod. -func getFirstServiceSelector(pod *v1.Pod, sl algorithm.ServiceLister) (firstServiceSelector labels.Selector) { +func getFirstServiceSelector(pod *v1.Pod, sl corelisters.ServiceLister) (firstServiceSelector labels.Selector) { if services, err := sl.GetPodServices(pod); err == nil && len(services) > 0 { return labels.SelectorFromSet(services[0].Spec.Selector) } return nil } -// getSelectors returns selectors of services, RCs and RSs matching the given pod. -func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector { - var selectors []labels.Selector +// getSelector returns a selector for the services, RCs, RSs, and SSs matching the given pod. +func getSelector(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) labels.Selector { + labelSet := make(labels.Set) + // Since services, RCs, RSs and SSs match the pod, they won't have conflicting + // labels. Merging is safe. if services, err := sl.GetPodServices(pod); err == nil { for _, service := range services { - selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) + labelSet = labels.Merge(labelSet, service.Spec.Selector) } } if rcs, err := cl.GetPodControllers(pod); err == nil { for _, rc := range rcs { - selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector)) + labelSet = labels.Merge(labelSet, rc.Spec.Selector) } } + selector := labels.NewSelector() + if len(labelSet) != 0 { + selector = labelSet.AsSelector() + } + if rss, err := rsl.GetPodReplicaSets(pod); err == nil { for _, rs := range rss { - if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { - selectors = append(selectors, selector) + if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { + if r, ok := other.Requirements(); ok { + selector = selector.Add(r...) + } } } } if sss, err := ssl.GetPodStatefulSets(pod); err == nil { for _, ss := range sss { - if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { - selectors = append(selectors, selector) + if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { + if r, ok := other.Requirements(); ok { + selector = selector.Add(r...) + } } } } - return selectors + return selector } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go index d7a69934b1f..5f65ff88b09 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go @@ -16,9 +16,7 @@ limitations under the License. package priorities -import ( - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" -) +import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" var ( mostRequestedRatioResources = DefaultRequestedRatioResources @@ -57,5 +55,5 @@ func mostRequestedScore(requested, capacity int64) int64 { return 0 } - return (requested * schedulerapi.MaxPriority) / capacity + return (requested * framework.MaxNodeScore) / capacity } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go index 870649d72fb..d3545584f80 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -19,10 +19,10 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -31,10 +31,10 @@ import ( // it will get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms // the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher // score the node gets. -func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } // default is the podspec. @@ -59,7 +59,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s // TODO: Avoid computing it for all nodes if this becomes a performance problem. nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions) if err != nil { - return schedulerapi.HostPriority{}, err + return framework.NodeScore{}, err } if nodeSelector.Matches(labels.Set(node.Labels)) { count += preferredSchedulingTerm.Weight @@ -67,11 +67,11 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s } } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: int(count), + return framework.NodeScore{ + Name: node.Name, + Score: int64(count), }, nil } // CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation. -var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false) +var CalculateNodeAffinityPriorityReduce = NormalizeReduce(framework.MaxNodeScore, false) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go index 2cedd13142b..a9bedd51c6e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go @@ -19,23 +19,23 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // NodeLabelPrioritizer contains information to calculate node label priority. type NodeLabelPrioritizer struct { - label string - presence bool + presentLabelsPreference []string + absentLabelsPreference []string } // NewNodeLabelPriority creates a NodeLabelPrioritizer. -func NewNodeLabelPriority(label string, presence bool) (PriorityMapFunction, PriorityReduceFunction) { +func NewNodeLabelPriority(presentLabelsPreference []string, absentLabelsPreference []string) (PriorityMapFunction, PriorityReduceFunction) { labelPrioritizer := &NodeLabelPrioritizer{ - label: label, - presence: presence, + presentLabelsPreference: presentLabelsPreference, + absentLabelsPreference: absentLabelsPreference, } return labelPrioritizer.CalculateNodeLabelPriorityMap, nil } @@ -43,19 +43,28 @@ func NewNodeLabelPriority(label string, presence bool) (PriorityMapFunction, Pri // CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value. // If presence is true, prioritizes nodes that have the specified label, regardless of value. // If presence is false, prioritizes nodes that do not have the specified label. -func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } - exists := labels.Set(node.Labels).Has(n.label) - score := 0 - if (exists && n.presence) || (!exists && !n.presence) { - score = schedulerapi.MaxPriority + score := int64(0) + for _, label := range n.presentLabelsPreference { + if labels.Set(node.Labels).Has(label) { + score += framework.MaxNodeScore + } } - return schedulerapi.HostPriority{ - Host: node.Name, + for _, label := range n.absentLabelsPreference { + if !labels.Set(node.Labels).Has(label) { + score += framework.MaxNodeScore + } + } + // Take average score for each label to ensure the score doesn't exceed MaxNodeScore. + score /= int64(len(n.presentLabelsPreference) + len(n.absentLabelsPreference)) + + return framework.NodeScore{ + Name: node.Name, Score: score, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index 8af4ce15c04..f7c17164ca8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -19,19 +19,19 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation // "scheduler.alpha.kubernetes.io/preferAvoidPods". -func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } var controllerRef *metav1.OwnerReference if priorityMeta, ok := meta.(*priorityMetadata); ok { @@ -49,19 +49,19 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node } } if controllerRef == nil { - return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil + return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil } avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations) if err != nil { // If we cannot get annotation, assume it's schedulable there. - return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil + return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil } for i := range avoids.PreferAvoidPods { avoid := &avoids.PreferAvoidPods[i] if avoid.PodSignature.PodController.Kind == controllerRef.Kind && avoid.PodSignature.PodController.UID == controllerRef.UID { - return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil + return framework.NodeScore{Name: node.Name, Score: 0}, nil } } - return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil + return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go index 416724cbea5..b65c6f9b36e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go @@ -18,21 +18,21 @@ package priorities import ( "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) // NormalizeReduce generates a PriorityReduceFunction that can normalize the result // scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by // subtracting it from maxPriority. -func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction { +func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction { return func( _ *v1.Pod, _ interface{}, - _ map[string]*schedulernodeinfo.NodeInfo, - result schedulerapi.HostPriorityList) error { + _ schedulerlisters.SharedLister, + result framework.NodeScoreList) error { - var maxCount int + var maxCount int64 for i := range result { if result[i].Score > maxCount { maxCount = result[i].Score diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go index a4e58382355..f06eb0b938a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go @@ -21,7 +21,7 @@ import ( "math" "k8s.io/klog" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // FunctionShape represents shape of scoring function. @@ -38,14 +38,23 @@ type FunctionShapePoint struct { var ( // give priority to least utilized nodes by default - defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{{0, 10}, {100, 0}}) + defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{ + { + Utilization: 0, + Score: framework.MaxNodeScore, + }, + { + Utilization: 100, + Score: framework.MinNodeScore, + }, + }) ) const ( minUtilization = 0 maxUtilization = 100 minScore = 0 - maxScore = schedulerapi.MaxPriority + maxScore = framework.MaxNodeScore ) // NewFunctionShape creates instance of FunctionShape in a safe way performing all diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go index c6563a5dac6..216f76c6743 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -25,7 +25,7 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -50,13 +50,13 @@ var DefaultRequestedRatioResources = ResourceToWeightMap{v1.ResourceMemory: 1, v func (r *ResourceAllocationPriority) PriorityMap( pod *v1.Pod, meta interface{}, - nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { + nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } if r.resourceToWeightMap == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("resources not found") + return framework.NodeScore{}, fmt.Errorf("resources not found") } requested := make(ResourceToValueMap, len(r.resourceToWeightMap)) allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap)) @@ -90,9 +90,9 @@ func (r *ResourceAllocationPriority) PriorityMap( } } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: int(score), + return framework.NodeScore{ + Name: node.Name, + Score: score, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go index 994f4be0c2b..7133f060dc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -19,8 +19,8 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + v1 "k8s.io/api/core/v1" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/klog" @@ -33,17 +33,17 @@ import ( // of the pod are satisfied, the node is assigned a score of 1. // Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have // same scores assigned by one of least and most requested priority functions. -func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } allocatableResources := nodeInfo.AllocatableResource() // compute pod limits var podLimits *schedulernodeinfo.Resource - if priorityMeta, ok := meta.(*priorityMetadata); ok { + if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil { // We were able to parse metadata, use podLimits from there. podLimits = priorityMeta.podLimits } else { @@ -54,7 +54,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU) memScore := computeScore(podLimits.Memory, allocatableResources.Memory) - score := int(0) + score := int64(0) if cpuScore == 1 || memScore == 1 { score = 1 } @@ -71,8 +71,8 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule ) } - return schedulerapi.HostPriority{ - Host: node.Name, + return framework.NodeScore{ + Name: node.Name, Score: score, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go index 3faba83810e..a9293e1538c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -19,10 +19,12 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" utilnode "k8s.io/kubernetes/pkg/util/node" @@ -35,18 +37,18 @@ const zoneWeighting float64 = 2.0 / 3.0 // SelectorSpread contains information to calculate selector spread priority. type SelectorSpread struct { - serviceLister algorithm.ServiceLister - controllerLister algorithm.ControllerLister - replicaSetLister algorithm.ReplicaSetLister - statefulSetLister algorithm.StatefulSetLister + serviceLister corelisters.ServiceLister + controllerLister corelisters.ReplicationControllerLister + replicaSetLister appslisters.ReplicaSetLister + statefulSetLister appslisters.StatefulSetLister } // NewSelectorSpreadPriority creates a SelectorSpread. func NewSelectorSpreadPriority( - serviceLister algorithm.ServiceLister, - controllerLister algorithm.ControllerLister, - replicaSetLister algorithm.ReplicaSetLister, - statefulSetLister algorithm.StatefulSetLister) (PriorityMapFunction, PriorityReduceFunction) { + serviceLister corelisters.ServiceLister, + controllerLister corelisters.ReplicationControllerLister, + replicaSetLister appslisters.ReplicaSetLister, + statefulSetLister appslisters.StatefulSetLister) (PriorityMapFunction, PriorityReduceFunction) { selectorSpread := &SelectorSpread{ serviceLister: serviceLister, controllerLister: controllerLister, @@ -63,32 +65,24 @@ func NewSelectorSpreadPriority( // It favors nodes that have fewer existing matching pods. // i.e. it pushes the scheduler towards a node where there's the smallest number of // pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled. -func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { - var selectors []labels.Selector +func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { + var selector labels.Selector node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } priorityMeta, ok := meta.(*priorityMetadata) if ok { - selectors = priorityMeta.podSelectors + selector = priorityMeta.podSelector } else { - selectors = getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) + selector = getSelector(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) } - if len(selectors) == 0 { - return schedulerapi.HostPriority{ - Host: node.Name, - Score: int(0), - }, nil - } - - count := countMatchingPods(pod.Namespace, selectors, nodeInfo) - - return schedulerapi.HostPriority{ - Host: node.Name, - Score: count, + count := countMatchingPods(pod.Namespace, selector, nodeInfo) + return framework.NodeScore{ + Name: node.Name, + Score: int64(count), }, nil } @@ -96,16 +90,20 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ // based on the number of existing matching pods on the node // where zone information is included on the nodes, it favors nodes // in zones with fewer existing matching pods. -func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { - countsByZone := make(map[string]int, 10) - maxCountByZone := int(0) - maxCountByNodeName := int(0) +func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, result framework.NodeScoreList) error { + countsByZone := make(map[string]int64, 10) + maxCountByZone := int64(0) + maxCountByNodeName := int64(0) for i := range result { if result[i].Score > maxCountByNodeName { maxCountByNodeName = result[i].Score } - zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node()) + nodeInfo, err := sharedLister.NodeInfos().Get(result[i].Name) + if err != nil { + return err + } + zoneID := utilnode.GetZoneKey(nodeInfo.Node()) if zoneID == "" { continue } @@ -122,29 +120,34 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa maxCountByNodeNameFloat64 := float64(maxCountByNodeName) maxCountByZoneFloat64 := float64(maxCountByZone) - MaxPriorityFloat64 := float64(schedulerapi.MaxPriority) + MaxNodeScoreFloat64 := float64(framework.MaxNodeScore) for i := range result { // initializing to the default/max node score of maxPriority - fScore := MaxPriorityFloat64 + fScore := MaxNodeScoreFloat64 if maxCountByNodeName > 0 { - fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64) + fScore = MaxNodeScoreFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64) } // If there is zone information present, incorporate it if haveZones { - zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node()) + nodeInfo, err := sharedLister.NodeInfos().Get(result[i].Name) + if err != nil { + return err + } + + zoneID := utilnode.GetZoneKey(nodeInfo.Node()) if zoneID != "" { - zoneScore := MaxPriorityFloat64 + zoneScore := MaxNodeScoreFloat64 if maxCountByZone > 0 { - zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64) + zoneScore = MaxNodeScoreFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64) } fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore) } } - result[i].Score = int(fScore) + result[i].Score = int64(fScore) if klog.V(10) { klog.Infof( - "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), + "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Name, int64(fScore), ) } } @@ -153,39 +156,24 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa // ServiceAntiAffinity contains information to calculate service anti-affinity priority. type ServiceAntiAffinity struct { - podLister algorithm.PodLister - serviceLister algorithm.ServiceLister - label string + podLister schedulerlisters.PodLister + serviceLister corelisters.ServiceLister + labels []string } // NewServiceAntiAffinityPriority creates a ServiceAntiAffinity. -func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (PriorityMapFunction, PriorityReduceFunction) { +func NewServiceAntiAffinityPriority(podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (PriorityMapFunction, PriorityReduceFunction) { antiAffinity := &ServiceAntiAffinity{ podLister: podLister, serviceLister: serviceLister, - label: label, + labels: labels, } return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce } -// Classifies nodes into ones with labels and without labels. -func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (map[string]string, []string) { - labeledNodes := map[string]string{} - nonLabeledNodes := []string{} - for _, node := range nodes { - if labels.Set(node.Labels).Has(s.label) { - label := labels.Set(node.Labels).Get(s.label) - labeledNodes[node.Name] = label - } else { - nonLabeledNodes = append(nonLabeledNodes, node.Name) - } - } - return labeledNodes, nonLabeledNodes -} - -// countMatchingPods cout pods based on namespace and matching all selectors -func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int { - if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || len(selectors) == 0 { +// countMatchingPods counts pods based on namespace and matching all selectors +func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int { + if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector.Empty() { return 0 } count := 0 @@ -193,14 +181,7 @@ func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo * // Ignore pods being deleted for spreading purposes // Similar to how it is done for SelectorSpreadPriority if namespace == pod.Namespace && pod.DeletionTimestamp == nil { - matches := true - for _, selector := range selectors { - if !selector.Matches(labels.Set(pod.Labels)) { - matches = false - break - } - } - if matches { + if selector.Matches(labels.Set(pod.Labels)) { count++ } } @@ -210,12 +191,12 @@ func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo * // CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service // on given machine -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { var firstServiceSelector labels.Selector node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } priorityMeta, ok := meta.(*priorityMetadata) if ok { @@ -223,54 +204,86 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta } else { firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister) } - //pods matched namespace,selector on current node - var selectors []labels.Selector + // Pods matched namespace,selector on current node. + var selector labels.Selector if firstServiceSelector != nil { - selectors = append(selectors, firstServiceSelector) + selector = firstServiceSelector + } else { + selector = labels.NewSelector() } - score := countMatchingPods(pod.Namespace, selectors, nodeInfo) + score := countMatchingPods(pod.Namespace, selector, nodeInfo) - return schedulerapi.HostPriority{ - Host: node.Name, - Score: score, + return framework.NodeScore{ + Name: node.Name, + Score: int64(score), }, nil } // CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { - var numServicePods int - var label string - podCounts := map[string]int{} +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, result framework.NodeScoreList) error { + reduceResult := make([]float64, len(result)) + for _, label := range s.labels { + if err := s.updateNodeScoresForLabel(sharedLister, result, reduceResult, label); err != nil { + return err + } + } + + // Update the result after all labels have been evaluated. + for i, nodeScore := range reduceResult { + result[i].Score = int64(nodeScore) + } + return nil +} + +// updateNodeScoresForLabel updates the node scores for a single label. Note it does not update the +// original result from the map phase directly, but instead updates the reduceResult, which is used +// to update the original result finally. This makes sure that each call to updateNodeScoresForLabel +// receives the same mapResult to work with. +// Why are doing this? This is a workaround for the migration from priorities to score plugins. +// Historically the priority is designed to handle only one label, and multiple priorities are configured +// to work with multiple labels. Using multiple plugins is not allowed in the new framework. Therefore +// we need to modify the old priority to be able to handle multiple labels so that it can be mapped +// to a single plugin. This will be deprecated soon. +func (s *ServiceAntiAffinity) updateNodeScoresForLabel(sharedLister schedulerlisters.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error { + var numServicePods int64 + var labelValue string + podCounts := map[string]int64{} labelNodesStatus := map[string]string{} - maxPriorityFloat64 := float64(schedulerapi.MaxPriority) + maxPriorityFloat64 := float64(framework.MaxNodeScore) - for _, hostPriority := range result { + for _, hostPriority := range mapResult { numServicePods += hostPriority.Score - if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) { + nodeInfo, err := sharedLister.NodeInfos().Get(hostPriority.Name) + if err != nil { + return err + } + if !labels.Set(nodeInfo.Node().Labels).Has(label) { continue } - label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label) - labelNodesStatus[hostPriority.Host] = label - podCounts[label] += hostPriority.Score + + labelValue = labels.Set(nodeInfo.Node().Labels).Get(label) + labelNodesStatus[hostPriority.Name] = labelValue + podCounts[labelValue] += hostPriority.Score } //score int - scale of 0-maxPriority // 0 being the lowest priority and maxPriority being the highest - for i, hostPriority := range result { - label, ok := labelNodesStatus[hostPriority.Host] + for i, hostPriority := range mapResult { + labelValue, ok := labelNodesStatus[hostPriority.Name] if !ok { - result[i].Host = hostPriority.Host - result[i].Score = int(0) continue } // initializing to the default/max node score of maxPriority fScore := maxPriorityFloat64 if numServicePods > 0 { - fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods)) + fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[labelValue]) / float64(numServicePods)) } - result[i].Host = hostPriority.Host - result[i].Score = int(fScore) + // The score of current label only accounts for 1/len(s.labels) of the total score. + // The policy API definition only allows a single label to be configured, associated with a weight. + // This is compensated by the fact that the total weight is the sum of all weights configured + // in each policy config. + reduceResult[i] += fScore / float64(len(s.labels)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go index 85be011cabe..a633635d644 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -19,9 +19,9 @@ package priorities import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -52,10 +52,10 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi } // ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node -func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { +func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { node := nodeInfo.Node() if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + return framework.NodeScore{}, fmt.Errorf("node not found") } // To hold all the tolerations with Effect PreferNoSchedule var tolerationsPreferNoSchedule []v1.Toleration @@ -66,11 +66,11 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo * tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations) } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule), + return framework.NodeScore{ + Name: node.Name, + Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)), }, nil } // ComputeTaintTolerationPriorityReduce calculates the source of each node based on the number of intolerable taints on the node -var ComputeTaintTolerationPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, true) +var ComputeTaintTolerationPriorityReduce = NormalizeReduce(framework.MaxNodeScore, true) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go index 8c0f8c0c4b2..b7841ded65a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go @@ -17,11 +17,13 @@ limitations under the License. package priorities import ( - "k8s.io/api/core/v1" + "sort" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) func makeNode(node string, milliCPU, memory int64) *v1.Node { @@ -56,21 +58,32 @@ func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedR } } -func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}) PriorityFunction { - return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { - result := make(schedulerapi.HostPriorityList, 0, len(nodes)) - for i := range nodes { - hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name]) - if err != nil { - return nil, err - } - result = append(result, hostResult) +func runMapReducePriority(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}, pod *v1.Pod, sharedLister schedulerlisters.SharedLister, nodes []*v1.Node) (framework.NodeScoreList, error) { + result := make(framework.NodeScoreList, 0, len(nodes)) + for i := range nodes { + nodeInfo, err := sharedLister.NodeInfos().Get(nodes[i].Name) + if err != nil { + return nil, err + } + hostResult, err := mapFn(pod, metaData, nodeInfo) + if err != nil { + return nil, err } - if reduceFn != nil { - if err := reduceFn(pod, metaData, nodeNameToInfo, result); err != nil { - return nil, err - } + result = append(result, hostResult) + } + if reduceFn != nil { + if err := reduceFn(pod, metaData, sharedLister, result); err != nil { + return nil, err } - return result, nil } + return result, nil +} + +func sortNodeScoreList(out framework.NodeScoreList) { + sort.Slice(out, func(i, j int) bool { + if out[i].Score == out[j].Score { + return out[i].Name < out[j].Name + } + return out[i].Score < out[j].Score + }) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/types.go index 6c98a780aee..6163ba0f530 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/types.go @@ -17,43 +17,36 @@ limitations under the License. package priorities import ( - "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + v1 "k8s.io/api/core/v1" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // PriorityMapFunction is a function that computes per-node results for a given node. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) +type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) // PriorityReduceFunction is a function that aggregated per-node results and computes // final scores for all nodes. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error +type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, result framework.NodeScoreList) error -// PriorityMetadataProducer is a function that computes metadata for a given pod. This +// MetadataProducer is a function that computes metadata for a given pod. This // is now used for only for priority functions. For predicates please use PredicateMetadataProducer. -type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} - -// PriorityFunction is a function that computes scores for all nodes. -// DEPRECATED -// Use Map-Reduce pattern for priority functions. -type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) +type MetadataProducer func(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{} // PriorityConfig is a config used for a priority function. type PriorityConfig struct { Name string Map PriorityMapFunction Reduce PriorityReduceFunction - // TODO: Remove it after migrating all functions to - // Map-Reduce pattern. - Function PriorityFunction - Weight int + Weight int64 } -// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. -func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} { +// EmptyMetadataProducer returns a no-op MetadataProducer type. +func EmptyMetadataProducer(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{} { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go index 81dedd42928..9ebd8c9dbb2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go @@ -18,7 +18,7 @@ package algorithm import ( "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -34,12 +34,12 @@ type SchedulerExtender interface { // the list of failed nodes and failure reasons. Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, - ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) + ) (filteredNodes []*v1.Node, failedNodesMap extenderv1.FailedNodesMap, err error) // Prioritize based on extender-implemented priority functions. The returned scores & weight // are used to compute the weighted score for an extender. The weighted scores are added to - // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. - Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) + // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. + Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) // Bind delegates the action of binding a pod to a node to the extender. Bind(binding *v1.Binding) error @@ -61,9 +61,9 @@ type SchedulerExtender interface { // 2. A different set of victim pod for every given candidate node after preemption phase of extender. ProcessPreemption( pod *v1.Pod, - nodeToVictims map[*v1.Node]*schedulerapi.Victims, + nodeToVictims map[*v1.Node]*extenderv1.Victims, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, - ) (map[*v1.Node]*schedulerapi.Victims, error) + ) (map[*v1.Node]*extenderv1.Victims, error) // SupportsPreemption returns if the scheduler extender support preemption or not. SupportsPreemption() bool diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go index 38aea9d7ab6..1d8d0574404 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go @@ -17,68 +17,22 @@ limitations under the License. package algorithm import ( - apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" ) -// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are +// NodeFieldSelectorKeys is a map that: the keys are node field selector keys; the values are // the functions to get the value of the node field. var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{ - schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name }, + api.ObjectNameField: func(n *v1.Node) string { return n.Name }, } -// NodeLister interface represents anything that can list nodes for a scheduler. -type NodeLister interface { - // We explicitly return []*v1.Node, instead of v1.NodeList, to avoid - // performing expensive copies that are unneeded. - ListNodes() []*v1.Node -} - -// PodFilter is a function to filter a pod. If pod passed return true else return false. -type PodFilter func(*v1.Pod) bool - -// PodLister interface represents anything that can list pods for a scheduler. -type PodLister interface { - // We explicitly return []*v1.Pod, instead of v1.PodList, to avoid - // performing expensive copies that are unneeded. - List(labels.Selector) ([]*v1.Pod, error) - // This is similar to "List()", but the returned slice does not - // contain pods that don't pass `podFilter`. - FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) -} - -// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler. -type ServiceLister interface { - // Lists all the services - List(labels.Selector) ([]*v1.Service, error) - // Gets the services for the given pod - GetPodServices(*v1.Pod) ([]*v1.Service, error) -} - -// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler. -type ControllerLister interface { - // Lists all the replication controllers - List(labels.Selector) ([]*v1.ReplicationController, error) - // Gets the services for the given pod - GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error) -} - -// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler. -type ReplicaSetLister interface { - // Gets the replicasets for the given pod - GetPodReplicaSets(*v1.Pod) ([]*apps.ReplicaSet, error) -} - -// PDBLister interface represents anything that can list PodDisruptionBudget objects. -type PDBLister interface { - // List() returns a list of PodDisruptionBudgets matching the selector. - List(labels.Selector) ([]*policyv1beta1.PodDisruptionBudget, error) -} - -var _ ControllerLister = &EmptyControllerLister{} +var _ corelisters.ReplicationControllerLister = &EmptyControllerLister{} // EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data type EmptyControllerLister struct{} @@ -93,28 +47,53 @@ func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1 return nil, nil } -var _ ReplicaSetLister = &EmptyReplicaSetLister{} +// ReplicationControllers returns nil +func (f EmptyControllerLister) ReplicationControllers(namespace string) corelisters.ReplicationControllerNamespaceLister { + return nil +} + +var _ appslisters.ReplicaSetLister = &EmptyReplicaSetLister{} // EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data type EmptyReplicaSetLister struct{} +// List returns nil +func (f EmptyReplicaSetLister) List(labels.Selector) ([]*appsv1.ReplicaSet, error) { + return nil, nil +} + // GetPodReplicaSets returns nil -func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*apps.ReplicaSet, err error) { +func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*appsv1.ReplicaSet, err error) { return nil, nil } +// ReplicaSets returns nil +func (f EmptyReplicaSetLister) ReplicaSets(namespace string) appslisters.ReplicaSetNamespaceLister { + return nil +} + // StatefulSetLister interface represents anything that can produce a list of StatefulSet; the list is consumed by a scheduler. type StatefulSetLister interface { // Gets the StatefulSet for the given pod. GetPodStatefulSets(*v1.Pod) ([]*apps.StatefulSet, error) } -var _ StatefulSetLister = &EmptyStatefulSetLister{} +var _ appslisters.StatefulSetLister = &EmptyStatefulSetLister{} // EmptyStatefulSetLister implements StatefulSetLister on []apps.StatefulSet returning empty data. type EmptyStatefulSetLister struct{} +// List returns nil +func (f EmptyStatefulSetLister) List(labels.Selector) ([]*appsv1.StatefulSet, error) { + return nil, nil +} + // GetPodStatefulSets of EmptyStatefulSetLister returns nil. -func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) { +func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*appsv1.StatefulSet, err error) { return nil, nil } + +// StatefulSets returns nil +func (f EmptyStatefulSetLister) StatefulSets(namespace string) appslisters.StatefulSetNamespaceLister { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm_factory.go similarity index 59% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm_factory.go index d07c747ae2c..6f08dbf3947 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm_factory.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package factory +package scheduler import ( "fmt" @@ -23,58 +23,48 @@ import ( "strings" "sync" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/client-go/informers" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/klog" ) -// PluginFactoryArgs are passed to all plugin factory functions. -type PluginFactoryArgs struct { - PodLister algorithm.PodLister - ServiceLister algorithm.ServiceLister - ControllerLister algorithm.ControllerLister - ReplicaSetLister algorithm.ReplicaSetLister - StatefulSetLister algorithm.StatefulSetLister - NodeLister algorithm.NodeLister - PDBLister algorithm.PDBLister - NodeInfo predicates.NodeInfo - CSINodeInfo predicates.CSINodeInfo - PVInfo predicates.PersistentVolumeInfo - PVCInfo predicates.PersistentVolumeClaimInfo - StorageClassInfo predicates.StorageClassInfo +// AlgorithmFactoryArgs are passed to all factory functions. +type AlgorithmFactoryArgs struct { + SharedLister schedulerlisters.SharedLister + InformerFactory informers.SharedInformerFactory VolumeBinder *volumebinder.VolumeBinder HardPodAffinitySymmetricWeight int32 } -// PriorityMetadataProducerFactory produces PriorityMetadataProducer from the given args. -type PriorityMetadataProducerFactory func(PluginFactoryArgs) priorities.PriorityMetadataProducer +// PriorityMetadataProducerFactory produces MetadataProducer from the given args. +type PriorityMetadataProducerFactory func(AlgorithmFactoryArgs) priorities.MetadataProducer -// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args. -type PredicateMetadataProducerFactory func(PluginFactoryArgs) predicates.PredicateMetadataProducer +// PredicateMetadataProducerFactory produces MetadataProducer from the given args. +type PredicateMetadataProducerFactory func(AlgorithmFactoryArgs) predicates.MetadataProducer // FitPredicateFactory produces a FitPredicate from the given args. -type FitPredicateFactory func(PluginFactoryArgs) predicates.FitPredicate +type FitPredicateFactory func(AlgorithmFactoryArgs) predicates.FitPredicate -// PriorityFunctionFactory produces a PriorityConfig from the given args. -// DEPRECATED -// Use Map-Reduce pattern for priority functions. -type PriorityFunctionFactory func(PluginFactoryArgs) priorities.PriorityFunction - -// PriorityFunctionFactory2 produces map & reduce priority functions +// PriorityFunctionFactory produces map & reduce priority functions // from a given args. -// FIXME: Rename to PriorityFunctionFactory. -type PriorityFunctionFactory2 func(PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) +type PriorityFunctionFactory func(AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) // PriorityConfigFactory produces a PriorityConfig from the given function and weight type PriorityConfigFactory struct { - Function PriorityFunctionFactory - MapReduceFunction PriorityFunctionFactory2 - Weight int + MapReduceFunction PriorityFunctionFactory + Weight int64 } var ( @@ -87,8 +77,8 @@ var ( algorithmProviderMap = make(map[string]AlgorithmProviderConfig) // Registered metadata producers - priorityMetadataProducer PriorityMetadataProducerFactory - predicateMetadataProducer PredicateMetadataProducerFactory + priorityMetadataProducerFactory PriorityMetadataProducerFactory + predicateMetadataProducerFactory PredicateMetadataProducerFactory ) const ( @@ -110,8 +100,8 @@ type Snapshot struct { algorithmProviderMap map[string]AlgorithmProviderConfig } -// Copy returns a snapshot of current registered predicates and priorities. -func Copy() *Snapshot { +// RegisteredPredicatesAndPrioritiesSnapshot returns a snapshot of current registered predicates and priorities. +func RegisteredPredicatesAndPrioritiesSnapshot() *Snapshot { schedulerFactoryMutex.RLock() defer schedulerFactoryMutex.RUnlock() @@ -146,8 +136,8 @@ func Copy() *Snapshot { return © } -// Apply sets state of predicates and priorities to `s`. -func Apply(s *Snapshot) { +// ApplyPredicatesAndPriorities sets state of predicates and priorities to `s`. +func ApplyPredicatesAndPriorities(s *Snapshot) { schedulerFactoryMutex.Lock() fitPredicateMap = s.fitPredicateMap mandatoryFitPredicates = s.mandatoryFitPredicates @@ -159,7 +149,7 @@ func Apply(s *Snapshot) { // RegisterFitPredicate registers a fit predicate with the algorithm // registry. Returns the name with which the predicate was registered. func RegisterFitPredicate(name string, predicate predicates.FitPredicate) string { - return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) predicates.FitPredicate { return predicate }) + return RegisterFitPredicateFactory(name, func(AlgorithmFactoryArgs) predicates.FitPredicate { return predicate }) } // RemoveFitPredicate removes a fit predicate from factory. @@ -180,7 +170,7 @@ func RemovePredicateKeyFromAlgoProvider(providerName, key string) error { validateAlgorithmNameOrDie(providerName) provider, ok := algorithmProviderMap[providerName] if !ok { - return fmt.Errorf("plugin %v has not been registered", providerName) + return fmt.Errorf("provider %v is not registered", providerName) } provider.FitPredicateKeys.Delete(key) return nil @@ -204,7 +194,7 @@ func InsertPredicateKeyToAlgoProvider(providerName, key string) error { validateAlgorithmNameOrDie(providerName) provider, ok := algorithmProviderMap[providerName] if !ok { - return fmt.Errorf("plugin %v has not been registered", providerName) + return fmt.Errorf("provider %v is not registered", providerName) } provider.FitPredicateKeys.Insert(key) return nil @@ -218,7 +208,6 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) { for _, provider := range algorithmProviderMap { provider.FitPredicateKeys.Insert(key) } - return } // InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap. @@ -229,7 +218,6 @@ func InsertPriorityKeyToAlgorithmProviderMap(key string) { for _, provider := range algorithmProviderMap { provider.PriorityFunctionKeys.Insert(key) } - return } // RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by @@ -239,7 +227,7 @@ func RegisterMandatoryFitPredicate(name string, predicate predicates.FitPredicat schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() validateAlgorithmNameOrDie(name) - fitPredicateMap[name] = func(PluginFactoryArgs) predicates.FitPredicate { return predicate } + fitPredicateMap[name] = func(AlgorithmFactoryArgs) predicates.FitPredicate { return predicate } mandatoryFitPredicates.Insert(name) return name } @@ -256,46 +244,73 @@ func RegisterFitPredicateFactory(name string, predicateFactory FitPredicateFacto // RegisterCustomFitPredicate registers a custom fit predicate with the algorithm registry. // Returns the name, with which the predicate was registered. -func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string { +func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy, pluginArgs *plugins.ConfigProducerArgs) string { var predicateFactory FitPredicateFactory var ok bool + policyName := policy.Name validatePredicateOrDie(policy) // generate the predicate function, if a custom type is requested if policy.Argument != nil { if policy.Argument.ServiceAffinity != nil { - predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate { + // We use the ServiceAffinity predicate name for all ServiceAffinity custom predicates. + // It may get called multiple times but we essentially only register one instance of ServiceAffinity predicate. + // This name is then used to find the registered plugin and run the plugin instead of the predicate. + policyName = predicates.CheckServiceAffinityPred + + // map LabelsPresence policy to ConfigProducerArgs that's used to configure the ServiceAffinity plugin. + if pluginArgs.ServiceAffinityArgs == nil { + pluginArgs.ServiceAffinityArgs = &serviceaffinity.Args{} + } + + pluginArgs.ServiceAffinityArgs.AffinityLabels = append(pluginArgs.ServiceAffinityArgs.AffinityLabels, policy.Argument.ServiceAffinity.Labels...) + + predicateFactory = func(args AlgorithmFactoryArgs) predicates.FitPredicate { predicate, precomputationFunction := predicates.NewServiceAffinityPredicate( - args.PodLister, - args.ServiceLister, - args.NodeInfo, - policy.Argument.ServiceAffinity.Labels, + args.SharedLister.NodeInfos(), + args.SharedLister.Pods(), + args.InformerFactory.Core().V1().Services().Lister(), + pluginArgs.ServiceAffinityArgs.AffinityLabels, ) // Once we generate the predicate we should also Register the Precomputation - predicates.RegisterPredicateMetadataProducer(policy.Name, precomputationFunction) + predicates.RegisterPredicateMetadataProducer(policyName, precomputationFunction) return predicate } } else if policy.Argument.LabelsPresence != nil { - predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate { + // We use the CheckNodeLabelPresencePred predicate name for all kNodeLabel custom predicates. + // It may get called multiple times but we essentially only register one instance of NodeLabel predicate. + // This name is then used to find the registered plugin and run the plugin instead of the predicate. + policyName = predicates.CheckNodeLabelPresencePred + + // Map LabelPresence policy to ConfigProducerArgs that's used to configure the NodeLabel plugin. + if pluginArgs.NodeLabelArgs == nil { + pluginArgs.NodeLabelArgs = &nodelabel.Args{} + } + if policy.Argument.LabelsPresence.Presence { + pluginArgs.NodeLabelArgs.PresentLabels = append(pluginArgs.NodeLabelArgs.PresentLabels, policy.Argument.LabelsPresence.Labels...) + } else { + pluginArgs.NodeLabelArgs.AbsentLabels = append(pluginArgs.NodeLabelArgs.AbsentLabels, policy.Argument.LabelsPresence.Labels...) + } + predicateFactory = func(_ AlgorithmFactoryArgs) predicates.FitPredicate { return predicates.NewNodeLabelPredicate( - policy.Argument.LabelsPresence.Labels, - policy.Argument.LabelsPresence.Presence, + pluginArgs.NodeLabelArgs.PresentLabels, + pluginArgs.NodeLabelArgs.AbsentLabels, ) } } - } else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok { + } else if predicateFactory, ok = fitPredicateMap[policyName]; ok { // checking to see if a pre-defined predicate is requested - klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name) - return policy.Name + klog.V(2).Infof("Predicate type %s already registered, reusing.", policyName) + return policyName } if predicateFactory == nil { - klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Predicate type not found for %s", policyName) } - return RegisterFitPredicateFactory(policy.Name, predicateFactory) + return RegisterFitPredicateFactory(policyName, predicateFactory) } // IsFitPredicateRegistered is useful for testing providers. @@ -307,30 +322,17 @@ func IsFitPredicateRegistered(name string) bool { } // RegisterPriorityMetadataProducerFactory registers a PriorityMetadataProducerFactory. -func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) { +func RegisterPriorityMetadataProducerFactory(f PriorityMetadataProducerFactory) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() - priorityMetadataProducer = factory + priorityMetadataProducerFactory = f } -// RegisterPredicateMetadataProducerFactory registers a PredicateMetadataProducerFactory. -func RegisterPredicateMetadataProducerFactory(factory PredicateMetadataProducerFactory) { +// RegisterPredicateMetadataProducerFactory registers a MetadataProducer. +func RegisterPredicateMetadataProducerFactory(f PredicateMetadataProducerFactory) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() - predicateMetadataProducer = factory -} - -// RegisterPriorityFunction registers a priority function with the algorithm registry. Returns the name, -// with which the function was registered. -// DEPRECATED -// Use Map-Reduce pattern for priority functions. -func RegisterPriorityFunction(name string, function priorities.PriorityFunction, weight int) string { - return RegisterPriorityConfigFactory(name, PriorityConfigFactory{ - Function: func(PluginFactoryArgs) priorities.PriorityFunction { - return function - }, - Weight: weight, - }) + predicateMetadataProducerFactory = f } // RegisterPriorityMapReduceFunction registers a priority function with the algorithm registry. Returns the name, @@ -341,10 +343,10 @@ func RegisterPriorityMapReduceFunction( reduceFunction priorities.PriorityReduceFunction, weight int) string { return RegisterPriorityConfigFactory(name, PriorityConfigFactory{ - MapReduceFunction: func(PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { + MapReduceFunction: func(AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { return mapFunction, reduceFunction }, - Weight: weight, + Weight: int64(weight), }) } @@ -359,66 +361,117 @@ func RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) strin // RegisterCustomPriorityFunction registers a custom priority function with the algorithm registry. // Returns the name, with which the priority function was registered. -func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { +func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy, configProducerArgs *plugins.ConfigProducerArgs) string { var pcf *PriorityConfigFactory + name := policy.Name validatePriorityOrDie(policy) // generate the priority function, if a custom priority is requested if policy.Argument != nil { if policy.Argument.ServiceAntiAffinity != nil { + // We use the ServiceAffinity plugin name for all ServiceAffinity custom priorities. + // It may get called multiple times but we essentially only register one instance of + // ServiceAffinity priority. + // This name is then used to find the registered plugin and run the plugin instead of the priority. + name = serviceaffinity.Name + + if configProducerArgs.ServiceAffinityArgs == nil { + configProducerArgs.ServiceAffinityArgs = &serviceaffinity.Args{} + } + configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference = append(configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference, policy.Argument.ServiceAntiAffinity.Label) + + weight := policy.Weight + schedulerFactoryMutex.RLock() + if existing, ok := priorityFunctionMap[name]; ok { + // If there are n ServiceAffinity priorities in the policy, the weight for the corresponding + // score plugin is n*(weight of each priority). + weight += existing.Weight + } + schedulerFactoryMutex.RUnlock() pcf = &PriorityConfigFactory{ - MapReduceFunction: func(args PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { + MapReduceFunction: func(args AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { return priorities.NewServiceAntiAffinityPriority( - args.PodLister, - args.ServiceLister, - policy.Argument.ServiceAntiAffinity.Label, + args.SharedLister.Pods(), + args.InformerFactory.Core().V1().Services().Lister(), + configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference, ) }, - Weight: policy.Weight, + Weight: weight, } } else if policy.Argument.LabelPreference != nil { + // We use the NodeLabel plugin name for all NodeLabel custom priorities. + // It may get called multiple times but we essentially only register one instance of NodeLabel priority. + // This name is then used to find the registered plugin and run the plugin instead of the priority. + name = nodelabel.Name + if configProducerArgs.NodeLabelArgs == nil { + configProducerArgs.NodeLabelArgs = &nodelabel.Args{} + } + if policy.Argument.LabelPreference.Presence { + configProducerArgs.NodeLabelArgs.PresentLabelsPreference = append(configProducerArgs.NodeLabelArgs.PresentLabelsPreference, policy.Argument.LabelPreference.Label) + } else { + configProducerArgs.NodeLabelArgs.AbsentLabelsPreference = append(configProducerArgs.NodeLabelArgs.AbsentLabelsPreference, policy.Argument.LabelPreference.Label) + } + weight := policy.Weight + schedulerFactoryMutex.RLock() + if existing, ok := priorityFunctionMap[name]; ok { + // If there are n NodeLabel priority configured in the policy, the weight for the corresponding + // priority is n*(weight of each priority in policy). + weight += existing.Weight + } + schedulerFactoryMutex.RUnlock() pcf = &PriorityConfigFactory{ - MapReduceFunction: func(args PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { + MapReduceFunction: func(_ AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { return priorities.NewNodeLabelPriority( - policy.Argument.LabelPreference.Label, - policy.Argument.LabelPreference.Presence, + configProducerArgs.NodeLabelArgs.PresentLabelsPreference, + configProducerArgs.NodeLabelArgs.AbsentLabelsPreference, ) }, - Weight: policy.Weight, + Weight: weight, } } else if policy.Argument.RequestedToCapacityRatioArguments != nil { + scoringFunctionShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(policy.Argument.RequestedToCapacityRatioArguments) + configProducerArgs.RequestedToCapacityRatioArgs = &requestedtocapacityratio.Args{ + FunctionShape: scoringFunctionShape, + ResourceToWeightMap: resources, + } pcf = &PriorityConfigFactory{ - MapReduceFunction: func(args PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { - scoringFunctionShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(policy.Argument.RequestedToCapacityRatioArguments) + MapReduceFunction: func(args AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { p := priorities.RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape, resources) return p.PriorityMap, nil }, Weight: policy.Weight, } + // We do not allow specifying the name for custom plugins, see #83472 + name = requestedtocapacityratio.Name } - } else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok { - klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name) + } else if existingPcf, ok := priorityFunctionMap[name]; ok { + klog.V(2).Infof("Priority type %s already registered, reusing.", name) // set/update the weight based on the policy pcf = &PriorityConfigFactory{ - Function: existingPcf.Function, MapReduceFunction: existingPcf.MapReduceFunction, Weight: policy.Weight, } } if pcf == nil { - klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Priority type not found for %s", name) } - return RegisterPriorityConfigFactory(policy.Name, *pcf) + return RegisterPriorityConfigFactory(name, *pcf) } func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) (priorities.FunctionShape, priorities.ResourceToWeightMap) { - n := len(arguments.UtilizationShape) + n := len(arguments.Shape) points := make([]priorities.FunctionShapePoint, 0, n) - for _, point := range arguments.UtilizationShape { - points = append(points, priorities.FunctionShapePoint{Utilization: int64(point.Utilization), Score: int64(point.Score)}) + for _, point := range arguments.Shape { + points = append(points, priorities.FunctionShapePoint{ + Utilization: int64(point.Utilization), + // MaxCustomPriorityScore may diverge from the max score used in the scheduler and defined by MaxNodeScore, + // therefore we need to scale the score returned by requested to capacity ratio to the score range + // used by the scheduler. + Score: int64(point.Score) * (framework.MaxNodeScore / schedulerapi.MaxCustomPriorityScore), + }) } shape, err := priorities.NewFunctionShape(points) if err != nil { @@ -430,9 +483,9 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s return shape, resourceToWeightMap } for _, resource := range arguments.Resources { - resourceToWeightMap[resource.Name] = int64(resource.Weight) + resourceToWeightMap[v1.ResourceName(resource.Name)] = resource.Weight if resource.Weight == 0 { - resourceToWeightMap[resource.Name] = 1 + resourceToWeightMap[v1.ResourceName(resource.Name)] = 1 } } return shape, resourceToWeightMap @@ -446,8 +499,7 @@ func IsPriorityFunctionRegistered(name string) bool { return ok } -// RegisterAlgorithmProvider registers a new algorithm provider with the algorithm registry. This should -// be called from the init function in a provider plugin. +// RegisterAlgorithmProvider registers a new algorithm provider with the algorithm registry. func RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys sets.String) string { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() @@ -466,13 +518,13 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) { provider, ok := algorithmProviderMap[name] if !ok { - return nil, fmt.Errorf("plugin %q has not been registered", name) + return nil, fmt.Errorf("provider %q is not registered", name) } return &provider, nil } -func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]predicates.FitPredicate, error) { +func getFitPredicateFunctions(names sets.String, args AlgorithmFactoryArgs) (map[string]predicates.FitPredicate, error) { schedulerFactoryMutex.RLock() defer schedulerFactoryMutex.RUnlock() @@ -495,27 +547,27 @@ func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[st return fitPredicates, nil } -func getPriorityMetadataProducer(args PluginFactoryArgs) (priorities.PriorityMetadataProducer, error) { +func getPriorityMetadataProducer(args AlgorithmFactoryArgs) (priorities.MetadataProducer, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() - if priorityMetadataProducer == nil { - return priorities.EmptyPriorityMetadataProducer, nil + if priorityMetadataProducerFactory == nil { + return priorities.EmptyMetadataProducer, nil } - return priorityMetadataProducer(args), nil + return priorityMetadataProducerFactory(args), nil } -func getPredicateMetadataProducer(args PluginFactoryArgs) (predicates.PredicateMetadataProducer, error) { +func getPredicateMetadataProducer(args AlgorithmFactoryArgs) (predicates.MetadataProducer, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() - if predicateMetadataProducer == nil { - return predicates.EmptyPredicateMetadataProducer, nil + if predicateMetadataProducerFactory == nil { + return predicates.EmptyMetadataProducer, nil } - return predicateMetadataProducer(args), nil + return predicateMetadataProducerFactory(args), nil } -func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]priorities.PriorityConfig, error) { +func getPriorityFunctionConfigs(names sets.String, args AlgorithmFactoryArgs) ([]priorities.PriorityConfig, error) { schedulerFactoryMutex.RLock() defer schedulerFactoryMutex.RUnlock() @@ -525,21 +577,13 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]pr if !ok { return nil, fmt.Errorf("invalid priority name %s specified - no corresponding function found", name) } - if factory.Function != nil { - configs = append(configs, priorities.PriorityConfig{ - Name: name, - Function: factory.Function(args), - Weight: factory.Weight, - }) - } else { - mapFunction, reduceFunction := factory.MapReduceFunction(args) - configs = append(configs, priorities.PriorityConfig{ - Name: name, - Map: mapFunction, - Reduce: reduceFunction, - Weight: factory.Weight, - }) - } + mapFunction, reduceFunction := factory.MapReduceFunction(args) + configs = append(configs, priorities.PriorityConfig{ + Name: name, + Map: mapFunction, + Reduce: reduceFunction, + Weight: factory.Weight, + }) } if err := validateSelectedConfigs(configs); err != nil { return nil, err @@ -549,13 +593,13 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]pr // validateSelectedConfigs validates the config weights to avoid the overflow. func validateSelectedConfigs(configs []priorities.PriorityConfig) error { - var totalPriority int + var totalPriority int64 for _, config := range configs { - // Checks totalPriority against MaxTotalPriority to avoid overflow - if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority { + // Checks totalPriority against MaxTotalScore to avoid overflow + if config.Weight*framework.MaxNodeScore > framework.MaxTotalScore-totalPriority { return fmt.Errorf("total priority of priority functions has overflown") } - totalPriority += config.Weight * schedulerapi.MaxPriority + totalPriority += config.Weight * framework.MaxNodeScore } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD index f47bf8c3537..71ad334411a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD @@ -17,12 +17,7 @@ go_test( name = "go_default_test", srcs = ["plugins_test.go"], embed = [":go_default_library"], - deps = [ - "//pkg/features:go_default_library", - "//pkg/scheduler/factory:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", - ], + deps = ["//pkg/scheduler:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD index a13259cdd43..57ddb2bb91a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -16,11 +16,10 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults", deps = [ "//pkg/features:go_default_library", + "//pkg/scheduler:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go index a37ec478e87..dd189dd18b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -23,9 +23,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - "k8s.io/kubernetes/pkg/scheduler/factory" ) const ( @@ -47,12 +47,9 @@ func defaultPredicates() sets.String { predicates.MatchInterPodAffinityPred, predicates.NoDiskConflictPred, predicates.GeneralPred, - predicates.CheckNodeMemoryPressurePred, - predicates.CheckNodeDiskPressurePred, - predicates.CheckNodePIDPressurePred, - predicates.CheckNodeConditionPred, predicates.PodToleratesNodeTaintsPred, predicates.CheckVolumeBindingPred, + predicates.CheckNodeUnschedulablePred, ) } @@ -61,57 +58,34 @@ func defaultPredicates() sets.String { // when this function is called, and should be called in tests which may modify the value // of a feature gate temporarily. func ApplyFeatureGates() (restore func()) { - snapshot := factory.Copy() - if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) { - // Remove "CheckNodeCondition", "CheckNodeMemoryPressure", "CheckNodePIDPressure" - // and "CheckNodeDiskPressure" predicates - factory.RemoveFitPredicate(predicates.CheckNodeConditionPred) - factory.RemoveFitPredicate(predicates.CheckNodeMemoryPressurePred) - factory.RemoveFitPredicate(predicates.CheckNodeDiskPressurePred) - factory.RemoveFitPredicate(predicates.CheckNodePIDPressurePred) - // Remove key "CheckNodeCondition", "CheckNodeMemoryPressure", "CheckNodePIDPressure" and "CheckNodeDiskPressure" - // from ALL algorithm provider - // The key will be removed from all providers which in algorithmProviderMap[] - // if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider() - factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred) - factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeMemoryPressurePred) - factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeDiskPressurePred) - factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodePIDPressurePred) - - // Fit is determined based on whether a pod can tolerate all of the node's taints - factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) - // Fit is determined based on whether a pod can tolerate unschedulable of node - factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate) - // Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider - // The key will insert to all providers which in algorithmProviderMap[] - // if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider() - factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) - factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred) - - klog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") - } + snapshot := scheduler.RegisteredPredicatesAndPrioritiesSnapshot() // Only register EvenPodsSpread predicate & priority if the feature is enabled if utilfeature.DefaultFeatureGate.Enabled(features.EvenPodsSpread) { klog.Infof("Registering EvenPodsSpread predicate and priority function") // register predicate - factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.EvenPodsSpreadPred) - factory.RegisterFitPredicate(predicates.EvenPodsSpreadPred, predicates.EvenPodsSpreadPredicate) + scheduler.InsertPredicateKeyToAlgorithmProviderMap(predicates.EvenPodsSpreadPred) + scheduler.RegisterFitPredicate(predicates.EvenPodsSpreadPred, predicates.EvenPodsSpreadPredicate) // register priority - factory.InsertPriorityKeyToAlgorithmProviderMap(priorities.EvenPodsSpreadPriority) - factory.RegisterPriorityFunction(priorities.EvenPodsSpreadPriority, priorities.CalculateEvenPodsSpreadPriority, 1) + scheduler.InsertPriorityKeyToAlgorithmProviderMap(priorities.EvenPodsSpreadPriority) + scheduler.RegisterPriorityMapReduceFunction( + priorities.EvenPodsSpreadPriority, + priorities.CalculateEvenPodsSpreadPriorityMap, + priorities.CalculateEvenPodsSpreadPriorityReduce, + 1, + ) } // Prioritizes nodes that satisfy pod's resource limits if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { klog.Infof("Registering resourcelimits priority function") - factory.RegisterPriorityMapReduceFunction(priorities.ResourceLimitsPriority, priorities.ResourceLimitsPriorityMap, nil, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.ResourceLimitsPriority, priorities.ResourceLimitsPriorityMap, nil, 1) // Register the priority function to specific provider too. - factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityMapReduceFunction(priorities.ResourceLimitsPriority, priorities.ResourceLimitsPriorityMap, nil, 1)) + scheduler.InsertPriorityKeyToAlgorithmProviderMap(scheduler.RegisterPriorityMapReduceFunction(priorities.ResourceLimitsPriority, priorities.ResourceLimitsPriorityMap, nil, 1)) } restore = func() { - factory.Apply(snapshot) + scheduler.ApplyPredicatesAndPriorities(snapshot) } return } @@ -119,9 +93,9 @@ func ApplyFeatureGates() (restore func()) { func registerAlgorithmProvider(predSet, priSet sets.String) { // Registers algorithm providers. By default we use 'DefaultProvider', but user can specify one to be used // by specifying flag. - factory.RegisterAlgorithmProvider(factory.DefaultProvider, predSet, priSet) + scheduler.RegisterAlgorithmProvider(scheduler.DefaultProvider, predSet, priSet) // Cluster autoscaler friendly scheduling algorithm. - factory.RegisterAlgorithmProvider(ClusterAutoscalerProvider, predSet, + scheduler.RegisterAlgorithmProvider(ClusterAutoscalerProvider, predSet, copyAndReplace(priSet, priorities.LeastRequestedPriority, priorities.MostRequestedPriority)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go index eb6bd09618a..a254490eab8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go @@ -17,15 +17,16 @@ limitations under the License. package defaults import ( + "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/factory" ) func init() { // Register functions that extract metadata used by predicates computations. - factory.RegisterPredicateMetadataProducerFactory( - func(args factory.PluginFactoryArgs) predicates.PredicateMetadataProducer { - return predicates.NewPredicateMetadataFactory(args.PodLister) + scheduler.RegisterPredicateMetadataProducerFactory( + func(args scheduler.AlgorithmFactoryArgs) predicates.MetadataProducer { + f := &predicates.MetadataProducerFactory{} + return f.GetPredicateMetadata }) // IMPORTANT NOTES for predicate developers: @@ -34,97 +35,111 @@ func init() { // PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding. // For backwards compatibility with 1.0, PodFitsPorts is registered as well. - factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts) + scheduler.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts) // Fit is defined based on the absence of port conflicts. // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate(predicates.PodFitsHostPortsPred, predicates.PodFitsHostPorts) + scheduler.RegisterFitPredicate(predicates.PodFitsHostPortsPred, predicates.PodFitsHostPorts) // Fit is determined by resource availability. // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate(predicates.PodFitsResourcesPred, predicates.PodFitsResources) + scheduler.RegisterFitPredicate(predicates.PodFitsResourcesPred, predicates.PodFitsResources) // Fit is determined by the presence of the Host parameter and a string match // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate(predicates.HostNamePred, predicates.PodFitsHost) + scheduler.RegisterFitPredicate(predicates.HostNamePred, predicates.PodFitsHost) // Fit is determined by node selector query. - factory.RegisterFitPredicate(predicates.MatchNodeSelectorPred, predicates.PodMatchNodeSelector) + scheduler.RegisterFitPredicate(predicates.MatchNodeSelectorPred, predicates.PodMatchNodeSelector) // Fit is determined by volume zone requirements. - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.NoVolumeZoneConflictPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewVolumeZonePredicate(pvLister, pvcLister, storageClassLister) }, ) // Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MaxEBSVolumeCountPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.CSINodeInfo, args.StorageClassInfo, args.PVInfo, args.PVCInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + csiNodeLister := scheduler.GetCSINodeLister(args.InformerFactory) + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, csiNodeLister, storageClassLister, pvLister, pvcLister) }, ) // Fit is determined by whether or not there would be too many GCE PD volumes attached to the node - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MaxGCEPDVolumeCountPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.CSINodeInfo, args.StorageClassInfo, args.PVInfo, args.PVCInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + csiNodeLister := scheduler.GetCSINodeLister(args.InformerFactory) + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, csiNodeLister, storageClassLister, pvLister, pvcLister) }, ) // Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MaxAzureDiskVolumeCountPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.CSINodeInfo, args.StorageClassInfo, args.PVInfo, args.PVCInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + csiNodeLister := scheduler.GetCSINodeLister(args.InformerFactory) + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, csiNodeLister, storageClassLister, pvLister, pvcLister) }, ) - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MaxCSIVolumeCountPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewCSIMaxVolumeLimitPredicate(args.CSINodeInfo, args.PVInfo, args.PVCInfo, args.StorageClassInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + csiNodeLister := scheduler.GetCSINodeLister(args.InformerFactory) + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewCSIMaxVolumeLimitPredicate(csiNodeLister, pvLister, pvcLister, storageClassLister) }, ) - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MaxCinderVolumeCountPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, args.CSINodeInfo, args.StorageClassInfo, args.PVInfo, args.PVCInfo) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + csiNodeLister := scheduler.GetCSINodeLister(args.InformerFactory) + pvLister := args.InformerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := args.InformerFactory.Core().V1().PersistentVolumeClaims().Lister() + storageClassLister := args.InformerFactory.Storage().V1().StorageClasses().Lister() + return predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, csiNodeLister, storageClassLister, pvLister, pvcLister) }, ) // Fit is determined by inter-pod affinity. - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.MatchInterPodAffinityPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister) + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { + return predicates.NewPodAffinityPredicate(args.SharedLister.NodeInfos(), args.SharedLister.Pods()) }, ) // Fit is determined by non-conflicting disk volumes. - factory.RegisterFitPredicate(predicates.NoDiskConflictPred, predicates.NoDiskConflict) + scheduler.RegisterFitPredicate(predicates.NoDiskConflictPred, predicates.NoDiskConflict) // GeneralPredicates are the predicates that are enforced by all Kubernetes components // (e.g. kubelet and all schedulers) - factory.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates) - - // Fit is determined by node memory pressure condition. - factory.RegisterFitPredicate(predicates.CheckNodeMemoryPressurePred, predicates.CheckNodeMemoryPressurePredicate) - - // Fit is determined by node disk pressure condition. - factory.RegisterFitPredicate(predicates.CheckNodeDiskPressurePred, predicates.CheckNodeDiskPressurePredicate) - - // Fit is determined by node pid pressure condition. - factory.RegisterFitPredicate(predicates.CheckNodePIDPressurePred, predicates.CheckNodePIDPressurePredicate) - - // Fit is determined by node conditions: not ready, network unavailable or out of disk. - factory.RegisterMandatoryFitPredicate(predicates.CheckNodeConditionPred, predicates.CheckNodeConditionPredicate) + scheduler.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates) // Fit is determined based on whether a pod can tolerate all of the node's taints - factory.RegisterFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) + scheduler.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) + + // Fit is determined based on whether a pod can tolerate unschedulable of node + scheduler.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate) // Fit is determined by volume topology requirements. - factory.RegisterFitPredicateFactory( + scheduler.RegisterFitPredicateFactory( predicates.CheckVolumeBindingPred, - func(args factory.PluginFactoryArgs) predicates.FitPredicate { + func(args scheduler.AlgorithmFactoryArgs) predicates.FitPredicate { return predicates.NewVolumeBindingPredicate(args.VolumeBinder) }, ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_priorities.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_priorities.go index e39338668f7..8887206d041 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_priorities.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_priorities.go @@ -17,81 +17,77 @@ limitations under the License. package defaults import ( + "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - "k8s.io/kubernetes/pkg/scheduler/core" - "k8s.io/kubernetes/pkg/scheduler/factory" ) func init() { // Register functions that extract metadata used by priorities computations. - factory.RegisterPriorityMetadataProducerFactory( - func(args factory.PluginFactoryArgs) priorities.PriorityMetadataProducer { - return priorities.NewPriorityMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) + scheduler.RegisterPriorityMetadataProducerFactory( + func(args scheduler.AlgorithmFactoryArgs) priorities.MetadataProducer { + serviceLister := args.InformerFactory.Core().V1().Services().Lister() + controllerLister := args.InformerFactory.Core().V1().ReplicationControllers().Lister() + replicaSetLister := args.InformerFactory.Apps().V1().ReplicaSets().Lister() + statefulSetLister := args.InformerFactory.Apps().V1().StatefulSets().Lister() + return priorities.NewMetadataFactory(serviceLister, controllerLister, replicaSetLister, statefulSetLister, args.HardPodAffinitySymmetricWeight) }) // ServiceSpreadingPriority is a priority config factory that spreads pods by minimizing // the number of pods (belonging to the same service) on the same node. // Register the factory so that it's available, but do not include it as part of the default priorities // Largely replaced by "SelectorSpreadPriority", but registered for backward compatibility with 1.0 - factory.RegisterPriorityConfigFactory( + scheduler.RegisterPriorityConfigFactory( priorities.ServiceSpreadingPriority, - factory.PriorityConfigFactory{ - MapReduceFunction: func(args factory.PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { - return priorities.NewSelectorSpreadPriority(args.ServiceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{}, algorithm.EmptyStatefulSetLister{}) + scheduler.PriorityConfigFactory{ + MapReduceFunction: func(args scheduler.AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { + serviceLister := args.InformerFactory.Core().V1().Services().Lister() + return priorities.NewSelectorSpreadPriority(serviceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{}, algorithm.EmptyStatefulSetLister{}) }, Weight: 1, }, ) - // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes - // Register the priority function so that its available - // but do not include it as part of the default priorities - factory.RegisterPriorityMapReduceFunction(priorities.EqualPriority, core.EqualPriorityMap, nil, 1) // Optional, cluster-autoscaler friendly priority function - give used nodes higher priority. - factory.RegisterPriorityMapReduceFunction(priorities.MostRequestedPriority, priorities.MostRequestedPriorityMap, nil, 1) - factory.RegisterPriorityMapReduceFunction( + scheduler.RegisterPriorityMapReduceFunction(priorities.MostRequestedPriority, priorities.MostRequestedPriorityMap, nil, 1) + scheduler.RegisterPriorityMapReduceFunction( priorities.RequestedToCapacityRatioPriority, priorities.RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, 1) // spreads pods by minimizing the number of pods (belonging to the same service or replication controller) on the same node. - factory.RegisterPriorityConfigFactory( + scheduler.RegisterPriorityConfigFactory( priorities.SelectorSpreadPriority, - factory.PriorityConfigFactory{ - MapReduceFunction: func(args factory.PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { - return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) + scheduler.PriorityConfigFactory{ + MapReduceFunction: func(args scheduler.AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { + serviceLister := args.InformerFactory.Core().V1().Services().Lister() + controllerLister := args.InformerFactory.Core().V1().ReplicationControllers().Lister() + replicaSetLister := args.InformerFactory.Apps().V1().ReplicaSets().Lister() + statefulSetLister := args.InformerFactory.Apps().V1().StatefulSets().Lister() + return priorities.NewSelectorSpreadPriority(serviceLister, controllerLister, replicaSetLister, statefulSetLister) }, Weight: 1, }, ) // pods should be placed in the same topological domain (e.g. same node, same rack, same zone, same power domain, etc.) // as some other pods, or, conversely, should not be placed in the same topological domain as some other pods. - factory.RegisterPriorityConfigFactory( - priorities.InterPodAffinityPriority, - factory.PriorityConfigFactory{ - Function: func(args factory.PluginFactoryArgs) priorities.PriorityFunction { - return priorities.NewInterPodAffinityPriority(args.NodeInfo, args.HardPodAffinitySymmetricWeight) - }, - Weight: 1, - }, - ) + scheduler.RegisterPriorityMapReduceFunction(priorities.InterPodAffinityPriority, priorities.CalculateInterPodAffinityPriorityMap, priorities.CalculateInterPodAffinityPriorityReduce, 1) // Prioritize nodes by least requested utilization. - factory.RegisterPriorityMapReduceFunction(priorities.LeastRequestedPriority, priorities.LeastRequestedPriorityMap, nil, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.LeastRequestedPriority, priorities.LeastRequestedPriorityMap, nil, 1) // Prioritizes nodes to help achieve balanced resource usage - factory.RegisterPriorityMapReduceFunction(priorities.BalancedResourceAllocation, priorities.BalancedResourceAllocationMap, nil, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.BalancedResourceAllocation, priorities.BalancedResourceAllocationMap, nil, 1) // Set this weight large enough to override all other priority functions. // TODO: Figure out a better way to do this, maybe at same time as fixing #24720. - factory.RegisterPriorityMapReduceFunction(priorities.NodePreferAvoidPodsPriority, priorities.CalculateNodePreferAvoidPodsPriorityMap, nil, 10000) + scheduler.RegisterPriorityMapReduceFunction(priorities.NodePreferAvoidPodsPriority, priorities.CalculateNodePreferAvoidPodsPriorityMap, nil, 10000) // Prioritizes nodes that have labels matching NodeAffinity - factory.RegisterPriorityMapReduceFunction(priorities.NodeAffinityPriority, priorities.CalculateNodeAffinityPriorityMap, priorities.CalculateNodeAffinityPriorityReduce, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.NodeAffinityPriority, priorities.CalculateNodeAffinityPriorityMap, priorities.CalculateNodeAffinityPriorityReduce, 1) // Prioritizes nodes that marked with taint which pod can tolerate. - factory.RegisterPriorityMapReduceFunction(priorities.TaintTolerationPriority, priorities.ComputeTaintTolerationPriorityMap, priorities.ComputeTaintTolerationPriorityReduce, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.TaintTolerationPriority, priorities.ComputeTaintTolerationPriorityMap, priorities.ComputeTaintTolerationPriorityReduce, 1) // ImageLocalityPriority prioritizes nodes that have images requested by the pod present. - factory.RegisterPriorityMapReduceFunction(priorities.ImageLocalityPriority, priorities.ImageLocalityPriorityMap, nil, 1) + scheduler.RegisterPriorityMapReduceFunction(priorities.ImageLocalityPriority, priorities.ImageLocalityPriorityMap, nil, 1) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD index da8f819eea2..d322950a866 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD @@ -7,22 +7,8 @@ load( go_library( name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "well_known_labels.go", - "zz_generated.deepcopy.go", - ], + srcs = ["well_known_labels.go"], importpath = "k8s.io/kubernetes/pkg/scheduler/api", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - ], ) filegroup( @@ -34,12 +20,6 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/scheduler/api/compatibility:all-srcs", - "//pkg/scheduler/api/latest:all-srcs", - "//pkg/scheduler/api/v1:all-srcs", - "//pkg/scheduler/api/validation:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD deleted file mode 100644 index 65b4db771a0..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["latest.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/api/latest", - deps = [ - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/api/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/yaml:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/latest.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/latest.go deleted file mode 100644 index f4a4ff7adcf..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/latest.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package latest - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - // Init the api v1 package - _ "k8s.io/kubernetes/pkg/scheduler/api/v1" -) - -// Version is the string that represents the current external default version. -const Version = "v1" - -// OldestVersion is the string that represents the oldest server version supported. -const OldestVersion = "v1" - -// Versions is the list of versions that are recognized in code. The order provided -// may be assumed to be least feature rich to most feature rich, and clients may -// choose to prefer the latter items in the list over the former items when presented -// with a set of versions to choose. -var Versions = []string{"v1"} - -// Codec is the default codec for serializing input that should use -// the latest supported version. It supports JSON by default. -var Codec runtime.Codec - -func init() { - jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, schedulerapi.Scheme, schedulerapi.Scheme, true) - serializer := yaml.NewDecodingSerializer(jsonSerializer) - Codec = versioning.NewDefaultingCodecForScheme( - schedulerapi.Scheme, - serializer, - serializer, - schema.GroupVersion{Version: Version}, - runtime.InternalGroupVersioner, - ) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go deleted file mode 100644 index 6b24fdc30d1..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,690 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { - *out = *in - if in.Pod != nil { - in, out := &in.Pod, &out.Pod - *out = new(corev1.Pod) - (*in).DeepCopyInto(*out) - } - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = new(corev1.NodeList) - (*in).DeepCopyInto(*out) - } - if in.NodeNames != nil { - in, out := &in.NodeNames, &out.NodeNames - *out = new([]string) - if **in != nil { - in, out := *in, *out - *out = make([]string, len(*in)) - copy(*out, *in) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs. -func (in *ExtenderArgs) DeepCopy() *ExtenderArgs { - if in == nil { - return nil - } - out := new(ExtenderArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs. -func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs { - if in == nil { - return nil - } - out := new(ExtenderBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult. -func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult { - if in == nil { - return nil - } - out := new(ExtenderBindingResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig. -func (in *ExtenderConfig) DeepCopy() *ExtenderConfig { - if in == nil { - return nil - } - out := new(ExtenderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = new(corev1.NodeList) - (*in).DeepCopyInto(*out) - } - if in.NodeNames != nil { - in, out := &in.NodeNames, &out.NodeNames - *out = new([]string) - if **in != nil { - in, out := *in, *out - *out = make([]string, len(*in)) - copy(*out, *in) - } - } - if in.FailedNodes != nil { - in, out := &in.FailedNodes, &out.FailedNodes - *out = make(FailedNodesMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult. -func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult { - if in == nil { - return nil - } - out := new(ExtenderFilterResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) { - *out = *in - if in.Pod != nil { - in, out := &in.Pod, &out.Pod - *out = new(corev1.Pod) - (*in).DeepCopyInto(*out) - } - if in.NodeNameToVictims != nil { - in, out := &in.NodeNameToVictims, &out.NodeNameToVictims - *out = make(map[string]*Victims, len(*in)) - for key, val := range *in { - var outVal *Victims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(Victims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - if in.NodeNameToMetaVictims != nil { - in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims - *out = make(map[string]*MetaVictims, len(*in)) - for key, val := range *in { - var outVal *MetaVictims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(MetaVictims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs. -func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs { - if in == nil { - return nil - } - out := new(ExtenderPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) { - *out = *in - if in.NodeNameToMetaVictims != nil { - in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims - *out = make(map[string]*MetaVictims, len(*in)) - for key, val := range *in { - var outVal *MetaVictims - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(MetaVictims) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult. -func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult { - if in == nil { - return nil - } - out := new(ExtenderPreemptionResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) { - { - in := &in - *out = make(FailedNodesMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap. -func (in FailedNodesMap) DeepCopy() FailedNodesMap { - if in == nil { - return nil - } - out := new(FailedNodesMap) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPriority) DeepCopyInto(out *HostPriority) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority. -func (in *HostPriority) DeepCopy() *HostPriority { - if in == nil { - return nil - } - out := new(HostPriority) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) { - { - in := &in - *out = make(HostPriorityList, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList. -func (in HostPriorityList) DeepCopy() HostPriorityList { - if in == nil { - return nil - } - out := new(HostPriorityList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabelPreference) DeepCopyInto(out *LabelPreference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference. -func (in *LabelPreference) DeepCopy() *LabelPreference { - if in == nil { - return nil - } - out := new(LabelPreference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence. -func (in *LabelsPresence) DeepCopy() *LabelsPresence { - if in == nil { - return nil - } - out := new(LabelsPresence) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetaPod) DeepCopyInto(out *MetaPod) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod. -func (in *MetaPod) DeepCopy() *MetaPod { - if in == nil { - return nil - } - out := new(MetaPod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetaVictims) DeepCopyInto(out *MetaVictims) { - *out = *in - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = make([]*MetaPod, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(MetaPod) - **out = **in - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims. -func (in *MetaVictims) DeepCopy() *MetaVictims { - if in == nil { - return nil - } - out := new(MetaVictims) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Predicates != nil { - in, out := &in.Predicates, &out.Predicates - *out = make([]PredicatePolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priorities != nil { - in, out := &in.Priorities, &out.Priorities - *out = make([]PriorityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ExtenderConfigs != nil { - in, out := &in.ExtenderConfigs, &out.ExtenderConfigs - *out = make([]ExtenderConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Policy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) { - *out = *in - if in.ServiceAffinity != nil { - in, out := &in.ServiceAffinity, &out.ServiceAffinity - *out = new(ServiceAffinity) - (*in).DeepCopyInto(*out) - } - if in.LabelsPresence != nil { - in, out := &in.LabelsPresence, &out.LabelsPresence - *out = new(LabelsPresence) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument. -func (in *PredicateArgument) DeepCopy() *PredicateArgument { - if in == nil { - return nil - } - out := new(PredicateArgument) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) { - *out = *in - if in.Argument != nil { - in, out := &in.Argument, &out.Argument - *out = new(PredicateArgument) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy. -func (in *PredicatePolicy) DeepCopy() *PredicatePolicy { - if in == nil { - return nil - } - out := new(PredicatePolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) { - *out = *in - if in.ServiceAntiAffinity != nil { - in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity - *out = new(ServiceAntiAffinity) - **out = **in - } - if in.LabelPreference != nil { - in, out := &in.LabelPreference, &out.LabelPreference - *out = new(LabelPreference) - **out = **in - } - if in.RequestedToCapacityRatioArguments != nil { - in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments - *out = new(RequestedToCapacityRatioArguments) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument. -func (in *PriorityArgument) DeepCopy() *PriorityArgument { - if in == nil { - return nil - } - out := new(PriorityArgument) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) { - *out = *in - if in.Argument != nil { - in, out := &in.Argument, &out.Argument - *out = new(PriorityArgument) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy. -func (in *PriorityPolicy) DeepCopy() *PriorityPolicy { - if in == nil { - return nil - } - out := new(PriorityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapacityRatioArguments) { - *out = *in - if in.UtilizationShape != nil { - in, out := &in.UtilizationShape, &out.UtilizationShape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArguments. -func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRatioArguments { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioArguments) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity. -func (in *ServiceAffinity) DeepCopy() *ServiceAffinity { - if in == nil { - return nil - } - out := new(ServiceAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity. -func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity { - if in == nil { - return nil - } - out := new(ServiceAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Victims) DeepCopyInto(out *Victims) { - *out = *in - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = make([]*corev1.Pod, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(corev1.Pod) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims. -func (in *Victims) DeepCopy() *Victims { - if in == nil { - return nil - } - out := new(Victims) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/validation.go deleted file mode 100644 index 5e9e53bec6f..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/validation.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "errors" - "fmt" - - "k8s.io/api/core/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" -) - -// ValidatePolicy checks for errors in the Config -// It does not return early so that it can find as many errors as possible -func ValidatePolicy(policy schedulerapi.Policy) error { - var validationErrors []error - - for _, priority := range policy.Priorities { - if priority.Weight <= 0 || priority.Weight >= schedulerapi.MaxWeight { - validationErrors = append(validationErrors, fmt.Errorf("Priority %s should have a positive weight applied to it or it has overflown", priority.Name)) - } - } - - binders := 0 - extenderManagedResources := sets.NewString() - for _, extender := range policy.ExtenderConfigs { - if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 { - validationErrors = append(validationErrors, fmt.Errorf("Priority for extender %s should have a positive weight applied to it", extender.URLPrefix)) - } - if extender.BindVerb != "" { - binders++ - } - for _, resource := range extender.ManagedResources { - errs := validateExtendedResourceName(resource.Name) - if len(errs) != 0 { - validationErrors = append(validationErrors, errs...) - } - if extenderManagedResources.Has(string(resource.Name)) { - validationErrors = append(validationErrors, fmt.Errorf("Duplicate extender managed resource name %s", string(resource.Name))) - } - extenderManagedResources.Insert(string(resource.Name)) - } - } - if binders > 1 { - validationErrors = append(validationErrors, fmt.Errorf("Only one extender can implement bind, found %v", binders)) - } - return utilerrors.NewAggregate(validationErrors) -} - -// validateExtendedResourceName checks whether the specified name is a valid -// extended resource name. -func validateExtendedResourceName(name v1.ResourceName) []error { - var validationErrors []error - for _, msg := range validation.IsQualifiedName(string(name)) { - validationErrors = append(validationErrors, errors.New(msg)) - } - if len(validationErrors) != 0 { - return validationErrors - } - if !v1helper.IsExtendedResourceName(name) { - validationErrors = append(validationErrors, fmt.Errorf("%s is an invalid extended resource name", name)) - } - return validationErrors -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go index e79722e1d82..765bf2260b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go @@ -16,47 +16,7 @@ limitations under the License. package api -import ( - api "k8s.io/kubernetes/pkg/apis/core" -) - const ( - // TaintNodeNotReady will be added when node is not ready - // and feature-gate for TaintBasedEvictions flag is enabled, - // and removed when node becomes ready. - TaintNodeNotReady = "node.kubernetes.io/not-ready" - - // TaintNodeUnreachable will be added when node becomes unreachable - // (corresponding to NodeReady status ConditionUnknown) - // and feature-gate for TaintBasedEvictions flag is enabled, - // and removed when node becomes reachable (NodeReady status ConditionTrue). - TaintNodeUnreachable = "node.kubernetes.io/unreachable" - - // TaintNodeUnschedulable will be added when node becomes unschedulable - // and feature-gate for TaintNodesByCondition flag is enabled, - // and removed when node becomes scheduable. - TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" - - // TaintNodeMemoryPressure will be added when node has memory pressure - // and feature-gate for TaintNodesByCondition flag is enabled, - // and removed when node has enough memory. - TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" - - // TaintNodeDiskPressure will be added when node has disk pressure - // and feature-gate for TaintNodesByCondition flag is enabled, - // and removed when node has enough disk. - TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" - - // TaintNodeNetworkUnavailable will be added when node's network is unavailable - // and feature-gate for TaintNodesByCondition flag is enabled, - // and removed when network becomes ready. - TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" - - // TaintNodePIDPressure will be added when node has pid pressure - // and feature-gate for TaintNodesByCondition flag is enabled, - // and removed when node has enough disk. - TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" - // TaintExternalCloudProvider sets this taint on a node to mark it as unusable, // when kubelet is started with the "external" cloud provider, until a controller // from the cloud-controller-manager intitializes this node, and then removes @@ -65,8 +25,4 @@ const ( // TaintNodeShutdown when node is shutdown in external cloud provider TaintNodeShutdown = "node.cloudprovider.kubernetes.io/shutdown" - - // NodeFieldSelectorKeyNodeName ('metadata.name') uses this as node field selector key - // when selecting node by node's name. - NodeFieldSelectorKeyNodeName = api.ObjectNameField ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD index 7abec4f1794..ffb3173e051 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "legacy_types.go", "register.go", "types.go", "zz_generated.deepcopy.go", @@ -30,6 +31,8 @@ filegroup( srcs = [ ":package-srcs", "//pkg/scheduler/apis/config/scheme:all-srcs", + "//pkg/scheduler/apis/config/testing:all-srcs", + "//pkg/scheduler/apis/config/v1:all-srcs", "//pkg/scheduler/apis/config/v1alpha1:all-srcs", "//pkg/scheduler/apis/config/validation:all-srcs", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/legacy_types.go similarity index 68% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/legacy_types.go index eb76ea39839..4e5ffa1ee21 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/legacy_types.go @@ -14,30 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package config import ( "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // MaxUint defines the max unsigned int value. - MaxUint = ^uint(0) - // MaxInt defines the max signed int value. - MaxInt = int(MaxUint >> 1) - // MaxTotalPriority defines the max total priority value. - MaxTotalPriority = MaxInt - // MaxPriority defines the max priority value. - MaxPriority = 10 - // MaxWeight defines the max weight value. - MaxWeight = MaxInt / MaxPriority - // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes - // that once found feasible, the scheduler stops looking for more nodes. - DefaultPercentageOfNodesToScore = 50 ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -55,7 +37,7 @@ type Policy struct { // If empty list, all priority functions will be bypassed. Priorities []PriorityPolicy // Holds the information to communicate with the extender(s) - ExtenderConfigs []ExtenderConfig + Extenders []Extender // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. @@ -86,7 +68,7 @@ type PriorityPolicy struct { Name string // The numeric multiplier for the node scores that the priority function generates // The weight should be a positive integer - Weight int + Weight int64 // Holds the parameters to configure the given priority function Argument *PriorityArgument } @@ -150,31 +132,31 @@ type LabelPreference struct { // RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. type RequestedToCapacityRatioArguments struct { // Array of point defining priority function shape - UtilizationShape []UtilizationShapePoint - Resources []ResourceSpec + Shape []UtilizationShapePoint + Resources []ResourceSpec } // UtilizationShapePoint represents single point of priority function shape type UtilizationShapePoint struct { // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int + Utilization int32 // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int + Score int32 } // ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments. type ResourceSpec struct { // Name of the resource to be managed by RequestedToCapacityRatio function. - Name v1.ResourceName + Name string // Weight of the resource. - Weight int + Weight int64 } // ExtenderManagedResource describes the arguments of extended resources // managed by an extender. type ExtenderManagedResource struct { // Name is the extended resource name. - Name v1.ResourceName + Name string // IgnoredByScheduler indicates whether kube-scheduler should ignore this // resource when applying predicates. IgnoredByScheduler bool @@ -207,9 +189,9 @@ type ExtenderTLSConfig struct { CAData []byte } -// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, // it is assumed that the extender chose not to provide that extension. -type ExtenderConfig struct { +type Extender struct { // URLPrefix at which the extender is available URLPrefix string // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. @@ -220,7 +202,7 @@ type ExtenderConfig struct { PrioritizeVerb string // The numeric multiplier for the node scores that the prioritize call generates. // The weight should be a positive integer - Weight int + Weight int64 // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // can implement this function. @@ -250,114 +232,3 @@ type ExtenderConfig struct { // fail when the extender returns an error or is not reachable. Ignorable bool } - -// ExtenderPreemptionResult represents the result returned by preemption phase of extender. -type ExtenderPreemptionResult struct { - NodeNameToMetaVictims map[string]*MetaVictims -} - -// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes. -type ExtenderPreemptionArgs struct { - // Pod being scheduled - Pod *v1.Pod - // Victims map generated by scheduler preemption phase - // Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims. - NodeNameToVictims map[string]*Victims - NodeNameToMetaVictims map[string]*MetaVictims -} - -// Victims represents: -// pods: a group of pods expected to be preempted. -// numPDBViolations: the count of violations of PodDisruptionBudget -type Victims struct { - Pods []*v1.Pod - NumPDBViolations int -} - -// MetaPod represent identifier for a v1.Pod -type MetaPod struct { - UID string -} - -// MetaVictims represents: -// pods: a group of pods expected to be preempted. -// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. -// numPDBViolations: the count of violations of PodDisruptionBudget -type MetaVictims struct { - Pods []*MetaPod - NumPDBViolations int -} - -// ExtenderArgs represents the arguments needed by the extender to filter/prioritize -// nodes for a pod. -type ExtenderArgs struct { - // Pod being scheduled - Pod *v1.Pod - // List of candidate nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == false - Nodes *v1.NodeList - // List of candidate node names where the pod can be scheduled; to be - // populated only if ExtenderConfig.NodeCacheCapable == true - NodeNames *[]string -} - -// FailedNodesMap represents the filtered out nodes, with node names and failure messages -type FailedNodesMap map[string]string - -// ExtenderFilterResult represents the results of a filter call to an extender -type ExtenderFilterResult struct { - // Filtered set of nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == false - Nodes *v1.NodeList - // Filtered set of nodes where the pod can be scheduled; to be populated - // only if ExtenderConfig.NodeCacheCapable == true - NodeNames *[]string - // Filtered out nodes where the pod can't be scheduled and the failure messages - FailedNodes FailedNodesMap - // Error message indicating failure - Error string -} - -// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node. -type ExtenderBindingArgs struct { - // PodName is the name of the pod being bound - PodName string - // PodNamespace is the namespace of the pod being bound - PodNamespace string - // PodUID is the UID of the pod being bound - PodUID types.UID - // Node selected by the scheduler - Node string -} - -// ExtenderBindingResult represents the result of binding of a pod to a node from an extender. -type ExtenderBindingResult struct { - // Error message indicating failure - Error string -} - -// HostPriority represents the priority of scheduling to a particular host, higher priority is better. -type HostPriority struct { - // Name of the host - Host string - // Score associated with the host - Score int -} - -// HostPriorityList declares a []HostPriority type. -type HostPriorityList []HostPriority - -func (h HostPriorityList) Len() int { - return len(h) -} - -func (h HostPriorityList) Less(i, j int) bool { - if h[i].Score == h[j].Score { - return h[i].Host < h[j].Host - } - return h[i].Score < h[j].Score -} - -func (h HostPriorityList) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go index bb2c6bad89b..bb67672faef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go @@ -38,6 +38,8 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &KubeSchedulerConfiguration{}, + &Policy{}, ) + scheme.AddKnownTypes(schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}, &Policy{}) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD new file mode 100644 index 00000000000..be8e71d9658 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["scheme.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/apis/config/v1:go_default_library", + "//pkg/scheduler/apis/config/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go new file mode 100644 index 00000000000..34f74a7f2c8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" + kubeschedulerconfigv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" + kubeschedulerconfigv1alpha1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1" +) + +var ( + // Scheme is the runtime.Scheme to which all kubescheduler api types are registered. + Scheme = runtime.NewScheme() + + // Codecs provides access to encoding and decoding for the scheme. + Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) +) + +func init() { + AddToScheme(Scheme) +} + +// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. +func AddToScheme(scheme *runtime.Scheme) { + utilruntime.Must(kubeschedulerconfig.AddToScheme(Scheme)) + utilruntime.Must(kubeschedulerconfigv1.AddToScheme(Scheme)) + utilruntime.Must(kubeschedulerconfigv1alpha1.AddToScheme(Scheme)) + utilruntime.Must(scheme.SetVersionPriority(kubeschedulerconfigv1alpha1.SchemeGroupVersion)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go index 30b51f2cfcb..9b84ccf356c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go @@ -17,6 +17,8 @@ limitations under the License. package config import ( + "math" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" componentbaseconfig "k8s.io/component-base/config" @@ -50,7 +52,7 @@ type KubeSchedulerConfiguration struct { AlgorithmSource SchedulerAlgorithmSource // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range [0-100]. HardPodAffinitySymmetricWeight int32 // LeaderElection defines the configuration of leader election client. @@ -86,7 +88,17 @@ type KubeSchedulerConfiguration struct { // Duration to wait for a binding operation to complete before timing out // Value must be non-negative integer. The value zero indicates no waiting. // If this value is nil, the default value will be used. - BindTimeoutSeconds *int64 + BindTimeoutSeconds int64 + + // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. + // If specified, it must be greater than 0. If this value is null, the default value (1s) + // will be used. + PodInitialBackoffSeconds int64 + + // PodMaxBackoffSeconds is the max backoff for unschedulable pods. + // If specified, it must be greater than or equal to podInitialBackoffSeconds. If this value is null, + // the default value (10s) will be used. + PodMaxBackoffSeconds int64 // Plugins specify the set of plugins that should be enabled or disabled. Enabled plugins are the // ones that should be enabled in addition to the default plugins. Disabled plugins are any of the @@ -130,7 +142,7 @@ type SchedulerPolicyFileSource struct { type SchedulerPolicyConfigMapSource struct { // Namespace is the namespace of the policy config map. Namespace string - // Name is the name of hte policy config map. + // Name is the name of the policy config map. Name string } @@ -209,3 +221,52 @@ type PluginConfig struct { // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. Args runtime.Unknown } + +/* + * NOTE: The following variables and methods are intentionally left out of the staging mirror. + */ +const ( + // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes + // that once found feasible, the scheduler stops looking for more nodes. + // A value of 0 means adaptive, meaning the scheduler figures out a proper default. + DefaultPercentageOfNodesToScore = 0 + + // MaxCustomPriorityScore is the max score UtilizationShapePoint expects. + MaxCustomPriorityScore int64 = 10 + + // MaxTotalScore is the maximum total score. + MaxTotalScore int64 = math.MaxInt64 + + // MaxWeight defines the max weight value allowed for custom PriorityPolicy + MaxWeight = MaxTotalScore / MaxCustomPriorityScore +) + +func appendPluginSet(dst *PluginSet, src *PluginSet) *PluginSet { + if dst == nil { + dst = &PluginSet{} + } + if src != nil { + dst.Enabled = append(dst.Enabled, src.Enabled...) + dst.Disabled = append(dst.Disabled, src.Disabled...) + } + return dst +} + +// Append appends src Plugins to current Plugins. If a PluginSet is nil, it will +// be created. +func (p *Plugins) Append(src *Plugins) { + if p == nil || src == nil { + return + } + p.QueueSort = appendPluginSet(p.QueueSort, src.QueueSort) + p.PreFilter = appendPluginSet(p.PreFilter, src.PreFilter) + p.Filter = appendPluginSet(p.Filter, src.Filter) + p.PostFilter = appendPluginSet(p.PostFilter, src.PostFilter) + p.Score = appendPluginSet(p.Score, src.Score) + p.Reserve = appendPluginSet(p.Reserve, src.Reserve) + p.Permit = appendPluginSet(p.Permit, src.Permit) + p.PreBind = appendPluginSet(p.PreBind, src.PreBind) + p.Bind = appendPluginSet(p.Bind, src.Bind) + p.PostBind = appendPluginSet(p.PostBind, src.PostBind) + p.Unreserve = appendPluginSet(p.Unreserve, src.Unreserve) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/BUILD new file mode 100644 index 00000000000..9a0724a1327 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/v1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/apis/config:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/kube-scheduler/config/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go new file mode 100644 index 00000000000..21bfc073adb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config +// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../../vendor/k8s.io/kube-scheduler/config/v1 +// +groupName=kubescheduler.config.k8s.io + +package v1 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go new file mode 100644 index 00000000000..d5deb35604b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. + localSchemeBuilder = &kubeschedulerconfigv1.SchemeBuilder + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(RegisterDefaults) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..c918b819fe9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go @@ -0,0 +1,559 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/kube-scheduler/config/v1" + config "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Extender_To_config_Extender(a.(*v1.Extender), b.(*config.Extender), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*v1.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Extender_To_v1_Extender(a.(*config.Extender), b.(*v1.Extender), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*v1.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*v1.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*v1.ExtenderManagedResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*v1.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*v1.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*v1.ExtenderTLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LabelPreference)(nil), (*config.LabelPreference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelPreference_To_config_LabelPreference(a.(*v1.LabelPreference), b.(*config.LabelPreference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.LabelPreference)(nil), (*v1.LabelPreference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_LabelPreference_To_v1_LabelPreference(a.(*config.LabelPreference), b.(*v1.LabelPreference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LabelsPresence)(nil), (*config.LabelsPresence)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelsPresence_To_config_LabelsPresence(a.(*v1.LabelsPresence), b.(*config.LabelsPresence), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.LabelsPresence)(nil), (*v1.LabelsPresence)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_LabelsPresence_To_v1_LabelsPresence(a.(*config.LabelsPresence), b.(*v1.LabelsPresence), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Policy)(nil), (*config.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Policy_To_config_Policy(a.(*v1.Policy), b.(*config.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Policy)(nil), (*v1.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Policy_To_v1_Policy(a.(*config.Policy), b.(*v1.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PredicateArgument)(nil), (*config.PredicateArgument)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PredicateArgument_To_config_PredicateArgument(a.(*v1.PredicateArgument), b.(*config.PredicateArgument), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PredicateArgument)(nil), (*v1.PredicateArgument)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PredicateArgument_To_v1_PredicateArgument(a.(*config.PredicateArgument), b.(*v1.PredicateArgument), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PredicatePolicy)(nil), (*config.PredicatePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PredicatePolicy_To_config_PredicatePolicy(a.(*v1.PredicatePolicy), b.(*config.PredicatePolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PredicatePolicy)(nil), (*v1.PredicatePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PredicatePolicy_To_v1_PredicatePolicy(a.(*config.PredicatePolicy), b.(*v1.PredicatePolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PriorityArgument)(nil), (*config.PriorityArgument)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PriorityArgument_To_config_PriorityArgument(a.(*v1.PriorityArgument), b.(*config.PriorityArgument), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PriorityArgument)(nil), (*v1.PriorityArgument)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PriorityArgument_To_v1_PriorityArgument(a.(*config.PriorityArgument), b.(*v1.PriorityArgument), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PriorityPolicy)(nil), (*config.PriorityPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PriorityPolicy_To_config_PriorityPolicy(a.(*v1.PriorityPolicy), b.(*config.PriorityPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PriorityPolicy)(nil), (*v1.PriorityPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PriorityPolicy_To_v1_PriorityPolicy(a.(*config.PriorityPolicy), b.(*v1.PriorityPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.RequestedToCapacityRatioArguments)(nil), (*config.RequestedToCapacityRatioArguments)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RequestedToCapacityRatioArguments_To_config_RequestedToCapacityRatioArguments(a.(*v1.RequestedToCapacityRatioArguments), b.(*config.RequestedToCapacityRatioArguments), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioArguments)(nil), (*v1.RequestedToCapacityRatioArguments)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_RequestedToCapacityRatioArguments_To_v1_RequestedToCapacityRatioArguments(a.(*config.RequestedToCapacityRatioArguments), b.(*v1.RequestedToCapacityRatioArguments), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceSpec_To_config_ResourceSpec(a.(*v1.ResourceSpec), b.(*config.ResourceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*v1.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ResourceSpec_To_v1_ResourceSpec(a.(*config.ResourceSpec), b.(*v1.ResourceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceAffinity)(nil), (*config.ServiceAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAffinity_To_config_ServiceAffinity(a.(*v1.ServiceAffinity), b.(*config.ServiceAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ServiceAffinity)(nil), (*v1.ServiceAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ServiceAffinity_To_v1_ServiceAffinity(a.(*config.ServiceAffinity), b.(*v1.ServiceAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceAntiAffinity)(nil), (*config.ServiceAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(a.(*v1.ServiceAntiAffinity), b.(*config.ServiceAntiAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ServiceAntiAffinity)(nil), (*v1.ServiceAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(a.(*config.ServiceAntiAffinity), b.(*v1.ServiceAntiAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*v1.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*v1.UtilizationShapePoint), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Extender_To_config_Extender(in *v1.Extender, out *config.Extender, s conversion.Scope) error { + out.URLPrefix = in.URLPrefix + out.FilterVerb = in.FilterVerb + out.PreemptVerb = in.PreemptVerb + out.PrioritizeVerb = in.PrioritizeVerb + out.Weight = in.Weight + out.BindVerb = in.BindVerb + out.EnableHTTPS = in.EnableHTTPS + out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) + out.HTTPTimeout = time.Duration(in.HTTPTimeout) + out.NodeCacheCapable = in.NodeCacheCapable + out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) + out.Ignorable = in.Ignorable + return nil +} + +// Convert_v1_Extender_To_config_Extender is an autogenerated conversion function. +func Convert_v1_Extender_To_config_Extender(in *v1.Extender, out *config.Extender, s conversion.Scope) error { + return autoConvert_v1_Extender_To_config_Extender(in, out, s) +} + +func autoConvert_config_Extender_To_v1_Extender(in *config.Extender, out *v1.Extender, s conversion.Scope) error { + out.URLPrefix = in.URLPrefix + out.FilterVerb = in.FilterVerb + out.PreemptVerb = in.PreemptVerb + out.PrioritizeVerb = in.PrioritizeVerb + out.Weight = in.Weight + out.BindVerb = in.BindVerb + out.EnableHTTPS = in.EnableHTTPS + out.TLSConfig = (*v1.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) + out.HTTPTimeout = time.Duration(in.HTTPTimeout) + out.NodeCacheCapable = in.NodeCacheCapable + out.ManagedResources = *(*[]v1.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) + out.Ignorable = in.Ignorable + return nil +} + +// Convert_config_Extender_To_v1_Extender is an autogenerated conversion function. +func Convert_config_Extender_To_v1_Extender(in *config.Extender, out *v1.Extender, s conversion.Scope) error { + return autoConvert_config_Extender_To_v1_Extender(in, out, s) +} + +func autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { + out.Name = in.Name + out.IgnoredByScheduler = in.IgnoredByScheduler + return nil +} + +// Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. +func Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { + return autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) +} + +func autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1.ExtenderManagedResource, s conversion.Scope) error { + out.Name = in.Name + out.IgnoredByScheduler = in.IgnoredByScheduler + return nil +} + +// Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource is an autogenerated conversion function. +func Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1.ExtenderManagedResource, s conversion.Scope) error { + return autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in, out, s) +} + +func autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { + out.Insecure = in.Insecure + out.ServerName = in.ServerName + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + out.CAFile = in.CAFile + out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) + out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) + out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) + return nil +} + +// Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. +func Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { + return autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) +} + +func autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1.ExtenderTLSConfig, s conversion.Scope) error { + out.Insecure = in.Insecure + out.ServerName = in.ServerName + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + out.CAFile = in.CAFile + out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) + out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) + out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) + return nil +} + +// Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig is an autogenerated conversion function. +func Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1.ExtenderTLSConfig, s conversion.Scope) error { + return autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in, out, s) +} + +func autoConvert_v1_LabelPreference_To_config_LabelPreference(in *v1.LabelPreference, out *config.LabelPreference, s conversion.Scope) error { + out.Label = in.Label + out.Presence = in.Presence + return nil +} + +// Convert_v1_LabelPreference_To_config_LabelPreference is an autogenerated conversion function. +func Convert_v1_LabelPreference_To_config_LabelPreference(in *v1.LabelPreference, out *config.LabelPreference, s conversion.Scope) error { + return autoConvert_v1_LabelPreference_To_config_LabelPreference(in, out, s) +} + +func autoConvert_config_LabelPreference_To_v1_LabelPreference(in *config.LabelPreference, out *v1.LabelPreference, s conversion.Scope) error { + out.Label = in.Label + out.Presence = in.Presence + return nil +} + +// Convert_config_LabelPreference_To_v1_LabelPreference is an autogenerated conversion function. +func Convert_config_LabelPreference_To_v1_LabelPreference(in *config.LabelPreference, out *v1.LabelPreference, s conversion.Scope) error { + return autoConvert_config_LabelPreference_To_v1_LabelPreference(in, out, s) +} + +func autoConvert_v1_LabelsPresence_To_config_LabelsPresence(in *v1.LabelsPresence, out *config.LabelsPresence, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + out.Presence = in.Presence + return nil +} + +// Convert_v1_LabelsPresence_To_config_LabelsPresence is an autogenerated conversion function. +func Convert_v1_LabelsPresence_To_config_LabelsPresence(in *v1.LabelsPresence, out *config.LabelsPresence, s conversion.Scope) error { + return autoConvert_v1_LabelsPresence_To_config_LabelsPresence(in, out, s) +} + +func autoConvert_config_LabelsPresence_To_v1_LabelsPresence(in *config.LabelsPresence, out *v1.LabelsPresence, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + out.Presence = in.Presence + return nil +} + +// Convert_config_LabelsPresence_To_v1_LabelsPresence is an autogenerated conversion function. +func Convert_config_LabelsPresence_To_v1_LabelsPresence(in *config.LabelsPresence, out *v1.LabelsPresence, s conversion.Scope) error { + return autoConvert_config_LabelsPresence_To_v1_LabelsPresence(in, out, s) +} + +func autoConvert_v1_Policy_To_config_Policy(in *v1.Policy, out *config.Policy, s conversion.Scope) error { + out.Predicates = *(*[]config.PredicatePolicy)(unsafe.Pointer(&in.Predicates)) + out.Priorities = *(*[]config.PriorityPolicy)(unsafe.Pointer(&in.Priorities)) + out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.AlwaysCheckAllPredicates = in.AlwaysCheckAllPredicates + return nil +} + +// Convert_v1_Policy_To_config_Policy is an autogenerated conversion function. +func Convert_v1_Policy_To_config_Policy(in *v1.Policy, out *config.Policy, s conversion.Scope) error { + return autoConvert_v1_Policy_To_config_Policy(in, out, s) +} + +func autoConvert_config_Policy_To_v1_Policy(in *config.Policy, out *v1.Policy, s conversion.Scope) error { + out.Predicates = *(*[]v1.PredicatePolicy)(unsafe.Pointer(&in.Predicates)) + out.Priorities = *(*[]v1.PriorityPolicy)(unsafe.Pointer(&in.Priorities)) + out.Extenders = *(*[]v1.Extender)(unsafe.Pointer(&in.Extenders)) + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.AlwaysCheckAllPredicates = in.AlwaysCheckAllPredicates + return nil +} + +// Convert_config_Policy_To_v1_Policy is an autogenerated conversion function. +func Convert_config_Policy_To_v1_Policy(in *config.Policy, out *v1.Policy, s conversion.Scope) error { + return autoConvert_config_Policy_To_v1_Policy(in, out, s) +} + +func autoConvert_v1_PredicateArgument_To_config_PredicateArgument(in *v1.PredicateArgument, out *config.PredicateArgument, s conversion.Scope) error { + out.ServiceAffinity = (*config.ServiceAffinity)(unsafe.Pointer(in.ServiceAffinity)) + out.LabelsPresence = (*config.LabelsPresence)(unsafe.Pointer(in.LabelsPresence)) + return nil +} + +// Convert_v1_PredicateArgument_To_config_PredicateArgument is an autogenerated conversion function. +func Convert_v1_PredicateArgument_To_config_PredicateArgument(in *v1.PredicateArgument, out *config.PredicateArgument, s conversion.Scope) error { + return autoConvert_v1_PredicateArgument_To_config_PredicateArgument(in, out, s) +} + +func autoConvert_config_PredicateArgument_To_v1_PredicateArgument(in *config.PredicateArgument, out *v1.PredicateArgument, s conversion.Scope) error { + out.ServiceAffinity = (*v1.ServiceAffinity)(unsafe.Pointer(in.ServiceAffinity)) + out.LabelsPresence = (*v1.LabelsPresence)(unsafe.Pointer(in.LabelsPresence)) + return nil +} + +// Convert_config_PredicateArgument_To_v1_PredicateArgument is an autogenerated conversion function. +func Convert_config_PredicateArgument_To_v1_PredicateArgument(in *config.PredicateArgument, out *v1.PredicateArgument, s conversion.Scope) error { + return autoConvert_config_PredicateArgument_To_v1_PredicateArgument(in, out, s) +} + +func autoConvert_v1_PredicatePolicy_To_config_PredicatePolicy(in *v1.PredicatePolicy, out *config.PredicatePolicy, s conversion.Scope) error { + out.Name = in.Name + out.Argument = (*config.PredicateArgument)(unsafe.Pointer(in.Argument)) + return nil +} + +// Convert_v1_PredicatePolicy_To_config_PredicatePolicy is an autogenerated conversion function. +func Convert_v1_PredicatePolicy_To_config_PredicatePolicy(in *v1.PredicatePolicy, out *config.PredicatePolicy, s conversion.Scope) error { + return autoConvert_v1_PredicatePolicy_To_config_PredicatePolicy(in, out, s) +} + +func autoConvert_config_PredicatePolicy_To_v1_PredicatePolicy(in *config.PredicatePolicy, out *v1.PredicatePolicy, s conversion.Scope) error { + out.Name = in.Name + out.Argument = (*v1.PredicateArgument)(unsafe.Pointer(in.Argument)) + return nil +} + +// Convert_config_PredicatePolicy_To_v1_PredicatePolicy is an autogenerated conversion function. +func Convert_config_PredicatePolicy_To_v1_PredicatePolicy(in *config.PredicatePolicy, out *v1.PredicatePolicy, s conversion.Scope) error { + return autoConvert_config_PredicatePolicy_To_v1_PredicatePolicy(in, out, s) +} + +func autoConvert_v1_PriorityArgument_To_config_PriorityArgument(in *v1.PriorityArgument, out *config.PriorityArgument, s conversion.Scope) error { + out.ServiceAntiAffinity = (*config.ServiceAntiAffinity)(unsafe.Pointer(in.ServiceAntiAffinity)) + out.LabelPreference = (*config.LabelPreference)(unsafe.Pointer(in.LabelPreference)) + out.RequestedToCapacityRatioArguments = (*config.RequestedToCapacityRatioArguments)(unsafe.Pointer(in.RequestedToCapacityRatioArguments)) + return nil +} + +// Convert_v1_PriorityArgument_To_config_PriorityArgument is an autogenerated conversion function. +func Convert_v1_PriorityArgument_To_config_PriorityArgument(in *v1.PriorityArgument, out *config.PriorityArgument, s conversion.Scope) error { + return autoConvert_v1_PriorityArgument_To_config_PriorityArgument(in, out, s) +} + +func autoConvert_config_PriorityArgument_To_v1_PriorityArgument(in *config.PriorityArgument, out *v1.PriorityArgument, s conversion.Scope) error { + out.ServiceAntiAffinity = (*v1.ServiceAntiAffinity)(unsafe.Pointer(in.ServiceAntiAffinity)) + out.LabelPreference = (*v1.LabelPreference)(unsafe.Pointer(in.LabelPreference)) + out.RequestedToCapacityRatioArguments = (*v1.RequestedToCapacityRatioArguments)(unsafe.Pointer(in.RequestedToCapacityRatioArguments)) + return nil +} + +// Convert_config_PriorityArgument_To_v1_PriorityArgument is an autogenerated conversion function. +func Convert_config_PriorityArgument_To_v1_PriorityArgument(in *config.PriorityArgument, out *v1.PriorityArgument, s conversion.Scope) error { + return autoConvert_config_PriorityArgument_To_v1_PriorityArgument(in, out, s) +} + +func autoConvert_v1_PriorityPolicy_To_config_PriorityPolicy(in *v1.PriorityPolicy, out *config.PriorityPolicy, s conversion.Scope) error { + out.Name = in.Name + out.Weight = in.Weight + out.Argument = (*config.PriorityArgument)(unsafe.Pointer(in.Argument)) + return nil +} + +// Convert_v1_PriorityPolicy_To_config_PriorityPolicy is an autogenerated conversion function. +func Convert_v1_PriorityPolicy_To_config_PriorityPolicy(in *v1.PriorityPolicy, out *config.PriorityPolicy, s conversion.Scope) error { + return autoConvert_v1_PriorityPolicy_To_config_PriorityPolicy(in, out, s) +} + +func autoConvert_config_PriorityPolicy_To_v1_PriorityPolicy(in *config.PriorityPolicy, out *v1.PriorityPolicy, s conversion.Scope) error { + out.Name = in.Name + out.Weight = in.Weight + out.Argument = (*v1.PriorityArgument)(unsafe.Pointer(in.Argument)) + return nil +} + +// Convert_config_PriorityPolicy_To_v1_PriorityPolicy is an autogenerated conversion function. +func Convert_config_PriorityPolicy_To_v1_PriorityPolicy(in *config.PriorityPolicy, out *v1.PriorityPolicy, s conversion.Scope) error { + return autoConvert_config_PriorityPolicy_To_v1_PriorityPolicy(in, out, s) +} + +func autoConvert_v1_RequestedToCapacityRatioArguments_To_config_RequestedToCapacityRatioArguments(in *v1.RequestedToCapacityRatioArguments, out *config.RequestedToCapacityRatioArguments, s conversion.Scope) error { + out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) + out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1_RequestedToCapacityRatioArguments_To_config_RequestedToCapacityRatioArguments is an autogenerated conversion function. +func Convert_v1_RequestedToCapacityRatioArguments_To_config_RequestedToCapacityRatioArguments(in *v1.RequestedToCapacityRatioArguments, out *config.RequestedToCapacityRatioArguments, s conversion.Scope) error { + return autoConvert_v1_RequestedToCapacityRatioArguments_To_config_RequestedToCapacityRatioArguments(in, out, s) +} + +func autoConvert_config_RequestedToCapacityRatioArguments_To_v1_RequestedToCapacityRatioArguments(in *config.RequestedToCapacityRatioArguments, out *v1.RequestedToCapacityRatioArguments, s conversion.Scope) error { + out.Shape = *(*[]v1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) + out.Resources = *(*[]v1.ResourceSpec)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_config_RequestedToCapacityRatioArguments_To_v1_RequestedToCapacityRatioArguments is an autogenerated conversion function. +func Convert_config_RequestedToCapacityRatioArguments_To_v1_RequestedToCapacityRatioArguments(in *config.RequestedToCapacityRatioArguments, out *v1.RequestedToCapacityRatioArguments, s conversion.Scope) error { + return autoConvert_config_RequestedToCapacityRatioArguments_To_v1_RequestedToCapacityRatioArguments(in, out, s) +} + +func autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in *v1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { + out.Name = in.Name + out.Weight = in.Weight + return nil +} + +// Convert_v1_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. +func Convert_v1_ResourceSpec_To_config_ResourceSpec(in *v1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in, out, s) +} + +func autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *v1.ResourceSpec, s conversion.Scope) error { + out.Name = in.Name + out.Weight = in.Weight + return nil +} + +// Convert_config_ResourceSpec_To_v1_ResourceSpec is an autogenerated conversion function. +func Convert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *v1.ResourceSpec, s conversion.Scope) error { + return autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in, out, s) +} + +func autoConvert_v1_ServiceAffinity_To_config_ServiceAffinity(in *v1.ServiceAffinity, out *config.ServiceAffinity, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_v1_ServiceAffinity_To_config_ServiceAffinity is an autogenerated conversion function. +func Convert_v1_ServiceAffinity_To_config_ServiceAffinity(in *v1.ServiceAffinity, out *config.ServiceAffinity, s conversion.Scope) error { + return autoConvert_v1_ServiceAffinity_To_config_ServiceAffinity(in, out, s) +} + +func autoConvert_config_ServiceAffinity_To_v1_ServiceAffinity(in *config.ServiceAffinity, out *v1.ServiceAffinity, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_config_ServiceAffinity_To_v1_ServiceAffinity is an autogenerated conversion function. +func Convert_config_ServiceAffinity_To_v1_ServiceAffinity(in *config.ServiceAffinity, out *v1.ServiceAffinity, s conversion.Scope) error { + return autoConvert_config_ServiceAffinity_To_v1_ServiceAffinity(in, out, s) +} + +func autoConvert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in *v1.ServiceAntiAffinity, out *config.ServiceAntiAffinity, s conversion.Scope) error { + out.Label = in.Label + return nil +} + +// Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity is an autogenerated conversion function. +func Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in *v1.ServiceAntiAffinity, out *config.ServiceAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in, out, s) +} + +func autoConvert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in *config.ServiceAntiAffinity, out *v1.ServiceAntiAffinity, s conversion.Scope) error { + out.Label = in.Label + return nil +} + +// Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity is an autogenerated conversion function. +func Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in *config.ServiceAntiAffinity, out *v1.ServiceAntiAffinity, s conversion.Scope) error { + return autoConvert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in, out, s) +} + +func autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { + out.Utilization = in.Utilization + out.Score = in.Score + return nil +} + +// Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. +func Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { + return autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) +} + +func autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1.UtilizationShapePoint, s conversion.Scope) error { + out.Utilization = in.Utilization + out.Score = in.Score + return nil +} + +// Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint is an autogenerated conversion function. +func Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1.UtilizationShapePoint, s conversion.Scope) error { + return autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..89e533d9930 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go new file mode 100644 index 00000000000..cce2e603a69 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD new file mode 100644 index 00000000000..a020af102be --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD @@ -0,0 +1,59 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/master/ports:go_default_library", + "//pkg/scheduler/apis/config:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library", + "//staging/src/k8s.io/kube-scheduler/config/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "conversion_test.go", + "defaults_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/apis/config:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/component-base/config:go_default_library", + "//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library", + "//staging/src/k8s.io/kube-scheduler/config/v1alpha1:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/conversion.go new file mode 100644 index 00000000000..bc43dd6b8ab --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/conversion.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + conversion "k8s.io/apimachinery/pkg/conversion" + v1alpha1 "k8s.io/kube-scheduler/config/v1alpha1" + config "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +// Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in *v1alpha1.KubeSchedulerLeaderElectionConfiguration, out *config.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in, out, s); err != nil { + return err + } + if len(in.ResourceNamespace) > 0 && len(in.LockObjectNamespace) == 0 { + out.ResourceNamespace = in.ResourceNamespace + } else if len(in.ResourceNamespace) == 0 && len(in.LockObjectNamespace) > 0 { + out.ResourceNamespace = in.LockObjectNamespace + } else if len(in.ResourceNamespace) > 0 && len(in.LockObjectNamespace) > 0 { + if in.ResourceNamespace == in.LockObjectNamespace { + out.ResourceNamespace = in.ResourceNamespace + } else { + return fmt.Errorf("ResourceNamespace and LockObjectNamespace are both set and do not match, ResourceNamespace: %s, LockObjectNamespace: %s", in.ResourceNamespace, in.LockObjectNamespace) + } + } + + if len(in.ResourceName) > 0 && len(in.LockObjectName) == 0 { + out.ResourceName = in.ResourceName + } else if len(in.ResourceName) == 0 && len(in.LockObjectName) > 0 { + out.ResourceName = in.LockObjectName + } else if len(in.ResourceName) > 0 && len(in.LockObjectName) > 0 { + if in.ResourceName == in.LockObjectName { + out.ResourceName = in.ResourceName + } else { + return fmt.Errorf("ResourceName and LockObjectName are both set and do not match, ResourceName: %s, LockObjectName: %s", in.ResourceName, in.LockObjectName) + } + } + return nil +} + +// Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function. +func Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *config.KubeSchedulerLeaderElectionConfiguration, out *v1alpha1.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := autoConvert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in, out, s); err != nil { + return err + } + out.ResourceNamespace = in.ResourceNamespace + out.LockObjectNamespace = in.ResourceNamespace + out.ResourceName = in.ResourceName + out.LockObjectName = in.ResourceName + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go new file mode 100644 index 00000000000..a618d90f592 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go @@ -0,0 +1,171 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "net" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + kubeschedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1" + + // this package shouldn't really depend on other k8s.io/kubernetes code + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_KubeSchedulerConfiguration sets additional defaults +func SetDefaults_KubeSchedulerConfiguration(obj *kubeschedulerconfigv1alpha1.KubeSchedulerConfiguration) { + if obj.SchedulerName == nil { + val := api.DefaultSchedulerName + obj.SchedulerName = &val + } + + if obj.HardPodAffinitySymmetricWeight == nil { + val := api.DefaultHardPodAffinitySymmetricWeight + obj.HardPodAffinitySymmetricWeight = &val + } + + if obj.AlgorithmSource.Policy == nil && + (obj.AlgorithmSource.Provider == nil || len(*obj.AlgorithmSource.Provider) == 0) { + val := kubeschedulerconfigv1alpha1.SchedulerDefaultProviderName + obj.AlgorithmSource.Provider = &val + } + + if policy := obj.AlgorithmSource.Policy; policy != nil { + if policy.ConfigMap != nil && len(policy.ConfigMap.Namespace) == 0 { + obj.AlgorithmSource.Policy.ConfigMap.Namespace = api.NamespaceSystem + } + } + + // For Healthz and Metrics bind addresses, we want to check: + // 1. If the value is nil, default to 0.0.0.0 and default scheduler port + // 2. If there is a value set, attempt to split it. If it's just a port (ie, ":1234"), default to 0.0.0.0 with that port + // 3. If splitting the value fails, check if the value is even a valid IP. If so, use that with the default port. + // Otherwise use the default bind address + defaultBindAddress := net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.InsecureSchedulerPort)) + if obj.HealthzBindAddress == nil { + obj.HealthzBindAddress = &defaultBindAddress + } else { + if host, port, err := net.SplitHostPort(*obj.HealthzBindAddress); err == nil { + if len(host) == 0 { + host = "0.0.0.0" + } + hostPort := net.JoinHostPort(host, port) + obj.HealthzBindAddress = &hostPort + } else { + // Something went wrong splitting the host/port, could just be a missing port so check if the + // existing value is a valid IP address. If so, use that with the default scheduler port + if host := net.ParseIP(*obj.HealthzBindAddress); host != nil { + hostPort := net.JoinHostPort(*obj.HealthzBindAddress, strconv.Itoa(ports.InsecureSchedulerPort)) + obj.HealthzBindAddress = &hostPort + } else { + // TODO: in v1beta1 we should let this error instead of stomping with a default value + obj.HealthzBindAddress = &defaultBindAddress + } + } + } + + if obj.MetricsBindAddress == nil { + obj.MetricsBindAddress = &defaultBindAddress + } else { + if host, port, err := net.SplitHostPort(*obj.MetricsBindAddress); err == nil { + if len(host) == 0 { + host = "0.0.0.0" + } + hostPort := net.JoinHostPort(host, port) + obj.MetricsBindAddress = &hostPort + } else { + // Something went wrong splitting the host/port, could just be a missing port so check if the + // existing value is a valid IP address. If so, use that with the default scheduler port + if host := net.ParseIP(*obj.MetricsBindAddress); host != nil { + hostPort := net.JoinHostPort(*obj.MetricsBindAddress, strconv.Itoa(ports.InsecureSchedulerPort)) + obj.MetricsBindAddress = &hostPort + } else { + // TODO: in v1beta1 we should let this error instead of stomping with a default value + obj.MetricsBindAddress = &defaultBindAddress + } + } + } + + if obj.DisablePreemption == nil { + disablePreemption := false + obj.DisablePreemption = &disablePreemption + } + + if obj.PercentageOfNodesToScore == nil { + percentageOfNodesToScore := int32(config.DefaultPercentageOfNodesToScore) + obj.PercentageOfNodesToScore = &percentageOfNodesToScore + } + + if len(obj.LeaderElection.ResourceLock) == 0 { + obj.LeaderElection.ResourceLock = "endpointsleases" + } + if len(obj.LeaderElection.LockObjectNamespace) == 0 && len(obj.LeaderElection.ResourceNamespace) == 0 { + obj.LeaderElection.LockObjectNamespace = kubeschedulerconfigv1alpha1.SchedulerDefaultLockObjectNamespace + } + if len(obj.LeaderElection.LockObjectName) == 0 && len(obj.LeaderElection.ResourceName) == 0 { + obj.LeaderElection.LockObjectName = kubeschedulerconfigv1alpha1.SchedulerDefaultLockObjectName + } + + if len(obj.ClientConnection.ContentType) == 0 { + obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" + } + // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. + if obj.ClientConnection.QPS == 0.0 { + obj.ClientConnection.QPS = 50.0 + } + if obj.ClientConnection.Burst == 0 { + obj.ClientConnection.Burst = 100 + } + + // Use the default LeaderElectionConfiguration options + componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection.LeaderElectionConfiguration) + + if obj.BindTimeoutSeconds == nil { + val := int64(600) + obj.BindTimeoutSeconds = &val + } + + if obj.PodInitialBackoffSeconds == nil { + val := int64(1) + obj.PodInitialBackoffSeconds = &val + } + + if obj.PodMaxBackoffSeconds == nil { + val := int64(10) + obj.PodMaxBackoffSeconds = &val + } + + // Enable profiling by default in the scheduler + if obj.EnableProfiling == nil { + enableProfiling := true + obj.EnableProfiling = &enableProfiling + } + + // Enable contention profiling by default if profiling is enabled + if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { + enableContentionProfiling := true + obj.EnableContentionProfiling = &enableContentionProfiling + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go new file mode 100644 index 00000000000..44518561fdd --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config +// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1alpha1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../../vendor/k8s.io/kube-scheduler/config/v1alpha1 +// +groupName=kubescheduler.config.k8s.io + +package v1alpha1 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go new file mode 100644 index 00000000000..0e9db36a6b2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + kubeschedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. + localSchemeBuilder = &kubeschedulerconfigv1alpha1.SchemeBuilder + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000000..0f1dc00bf5c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,686 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + v1alpha1 "k8s.io/kube-scheduler/config/v1alpha1" + config "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1alpha1.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1alpha1.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1alpha1.KubeSchedulerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Plugin_To_config_Plugin(a.(*v1alpha1.Plugin), b.(*config.Plugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*v1alpha1.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Plugin_To_v1alpha1_Plugin(a.(*config.Plugin), b.(*v1alpha1.Plugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PluginConfig_To_config_PluginConfig(a.(*v1alpha1.PluginConfig), b.(*config.PluginConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*v1alpha1.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PluginConfig_To_v1alpha1_PluginConfig(a.(*config.PluginConfig), b.(*v1alpha1.PluginConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PluginSet_To_config_PluginSet(a.(*v1alpha1.PluginSet), b.(*config.PluginSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*v1alpha1.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_PluginSet_To_v1alpha1_PluginSet(a.(*config.PluginSet), b.(*v1alpha1.PluginSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Plugins_To_config_Plugins(a.(*v1alpha1.Plugins), b.(*config.Plugins), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Plugins)(nil), (*v1alpha1.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Plugins_To_v1alpha1_Plugins(a.(*config.Plugins), b.(*v1alpha1.Plugins), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerAlgorithmSource)(nil), (*config.SchedulerAlgorithmSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(a.(*v1alpha1.SchedulerAlgorithmSource), b.(*config.SchedulerAlgorithmSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SchedulerAlgorithmSource)(nil), (*v1alpha1.SchedulerAlgorithmSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(a.(*config.SchedulerAlgorithmSource), b.(*v1alpha1.SchedulerAlgorithmSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicyConfigMapSource)(nil), (*config.SchedulerPolicyConfigMapSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(a.(*v1alpha1.SchedulerPolicyConfigMapSource), b.(*config.SchedulerPolicyConfigMapSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicyConfigMapSource)(nil), (*v1alpha1.SchedulerPolicyConfigMapSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(a.(*config.SchedulerPolicyConfigMapSource), b.(*v1alpha1.SchedulerPolicyConfigMapSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicyFileSource)(nil), (*config.SchedulerPolicyFileSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(a.(*v1alpha1.SchedulerPolicyFileSource), b.(*config.SchedulerPolicyFileSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicyFileSource)(nil), (*v1alpha1.SchedulerPolicyFileSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(a.(*config.SchedulerPolicyFileSource), b.(*v1alpha1.SchedulerPolicyFileSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicySource)(nil), (*config.SchedulerPolicySource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(a.(*v1alpha1.SchedulerPolicySource), b.(*config.SchedulerPolicySource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicySource)(nil), (*v1alpha1.SchedulerPolicySource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(a.(*config.SchedulerPolicySource), b.(*v1alpha1.SchedulerPolicySource), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*config.KubeSchedulerLeaderElectionConfiguration)(nil), (*v1alpha1.KubeSchedulerLeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(a.(*config.KubeSchedulerLeaderElectionConfiguration), b.(*v1alpha1.KubeSchedulerLeaderElectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha1.KubeSchedulerLeaderElectionConfiguration)(nil), (*config.KubeSchedulerLeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(a.(*v1alpha1.KubeSchedulerLeaderElectionConfiguration), b.(*config.KubeSchedulerLeaderElectionConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { + if err := v1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { + return err + } + if err := Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinitySymmetricWeight, &out.HardPodAffinitySymmetricWeight, s); err != nil { + return err + } + if err := Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { + return err + } + if err := componentbaseconfigv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { + return err + } + if err := componentbaseconfigv1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { + return err + } + if err := v1.Convert_Pointer_bool_To_bool(&in.DisablePreemption, &out.DisablePreemption, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { + return err + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(config.Plugins) + if err := Convert_v1alpha1_Plugins_To_config_Plugins(*in, *out, s); err != nil { + return err + } + } else { + out.Plugins = nil + } + out.PluginConfig = *(*[]config.PluginConfig)(unsafe.Pointer(&in.PluginConfig)) + return nil +} + +// Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1alpha1.KubeSchedulerConfiguration, s conversion.Scope) error { + if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { + return err + } + if err := Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinitySymmetricWeight, &out.HardPodAffinitySymmetricWeight, s); err != nil { + return err + } + if err := Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { + return err + } + if err := componentbaseconfigv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { + return err + } + if err := componentbaseconfigv1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { + return err + } + if err := v1.Convert_bool_To_Pointer_bool(&in.DisablePreemption, &out.DisablePreemption, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { + return err + } + if err := v1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { + return err + } + if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { + return err + } + if err := v1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { + return err + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(v1alpha1.Plugins) + if err := Convert_config_Plugins_To_v1alpha1_Plugins(*in, *out, s); err != nil { + return err + } + } else { + out.Plugins = nil + } + out.PluginConfig = *(*[]v1alpha1.PluginConfig)(unsafe.Pointer(&in.PluginConfig)) + return nil +} + +// Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration is an autogenerated conversion function. +func Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1alpha1.KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in *v1alpha1.KubeSchedulerLeaderElectionConfiguration, out *config.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := componentbaseconfigv1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { + return err + } + // WARNING: in.LockObjectNamespace requires manual conversion: does not exist in peer-type + // WARNING: in.LockObjectName requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *config.KubeSchedulerLeaderElectionConfiguration, out *v1alpha1.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := componentbaseconfigv1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_Plugin_To_config_Plugin(in *v1alpha1.Plugin, out *config.Plugin, s conversion.Scope) error { + out.Name = in.Name + if err := v1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Plugin_To_config_Plugin is an autogenerated conversion function. +func Convert_v1alpha1_Plugin_To_config_Plugin(in *v1alpha1.Plugin, out *config.Plugin, s conversion.Scope) error { + return autoConvert_v1alpha1_Plugin_To_config_Plugin(in, out, s) +} + +func autoConvert_config_Plugin_To_v1alpha1_Plugin(in *config.Plugin, out *v1alpha1.Plugin, s conversion.Scope) error { + out.Name = in.Name + if err := v1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { + return err + } + return nil +} + +// Convert_config_Plugin_To_v1alpha1_Plugin is an autogenerated conversion function. +func Convert_config_Plugin_To_v1alpha1_Plugin(in *config.Plugin, out *v1alpha1.Plugin, s conversion.Scope) error { + return autoConvert_config_Plugin_To_v1alpha1_Plugin(in, out, s) +} + +func autoConvert_v1alpha1_PluginConfig_To_config_PluginConfig(in *v1alpha1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { + out.Name = in.Name + out.Args = in.Args + return nil +} + +// Convert_v1alpha1_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. +func Convert_v1alpha1_PluginConfig_To_config_PluginConfig(in *v1alpha1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_PluginConfig_To_config_PluginConfig(in, out, s) +} + +func autoConvert_config_PluginConfig_To_v1alpha1_PluginConfig(in *config.PluginConfig, out *v1alpha1.PluginConfig, s conversion.Scope) error { + out.Name = in.Name + out.Args = in.Args + return nil +} + +// Convert_config_PluginConfig_To_v1alpha1_PluginConfig is an autogenerated conversion function. +func Convert_config_PluginConfig_To_v1alpha1_PluginConfig(in *config.PluginConfig, out *v1alpha1.PluginConfig, s conversion.Scope) error { + return autoConvert_config_PluginConfig_To_v1alpha1_PluginConfig(in, out, s) +} + +func autoConvert_v1alpha1_PluginSet_To_config_PluginSet(in *v1alpha1.PluginSet, out *config.PluginSet, s conversion.Scope) error { + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]config.Plugin, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enabled = nil + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]config.Plugin, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Disabled = nil + } + return nil +} + +// Convert_v1alpha1_PluginSet_To_config_PluginSet is an autogenerated conversion function. +func Convert_v1alpha1_PluginSet_To_config_PluginSet(in *v1alpha1.PluginSet, out *config.PluginSet, s conversion.Scope) error { + return autoConvert_v1alpha1_PluginSet_To_config_PluginSet(in, out, s) +} + +func autoConvert_config_PluginSet_To_v1alpha1_PluginSet(in *config.PluginSet, out *v1alpha1.PluginSet, s conversion.Scope) error { + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]v1alpha1.Plugin, len(*in)) + for i := range *in { + if err := Convert_config_Plugin_To_v1alpha1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enabled = nil + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]v1alpha1.Plugin, len(*in)) + for i := range *in { + if err := Convert_config_Plugin_To_v1alpha1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Disabled = nil + } + return nil +} + +// Convert_config_PluginSet_To_v1alpha1_PluginSet is an autogenerated conversion function. +func Convert_config_PluginSet_To_v1alpha1_PluginSet(in *config.PluginSet, out *v1alpha1.PluginSet, s conversion.Scope) error { + return autoConvert_config_PluginSet_To_v1alpha1_PluginSet(in, out, s) +} + +func autoConvert_v1alpha1_Plugins_To_config_Plugins(in *v1alpha1.Plugins, out *config.Plugins, s conversion.Scope) error { + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.QueueSort = nil + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PreFilter = nil + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Filter = nil + } + if in.PostFilter != nil { + in, out := &in.PostFilter, &out.PostFilter + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PostFilter = nil + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Score = nil + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Reserve = nil + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Permit = nil + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PreBind = nil + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Bind = nil + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PostBind = nil + } + if in.Unreserve != nil { + in, out := &in.Unreserve, &out.Unreserve + *out = new(config.PluginSet) + if err := Convert_v1alpha1_PluginSet_To_config_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Unreserve = nil + } + return nil +} + +// Convert_v1alpha1_Plugins_To_config_Plugins is an autogenerated conversion function. +func Convert_v1alpha1_Plugins_To_config_Plugins(in *v1alpha1.Plugins, out *config.Plugins, s conversion.Scope) error { + return autoConvert_v1alpha1_Plugins_To_config_Plugins(in, out, s) +} + +func autoConvert_config_Plugins_To_v1alpha1_Plugins(in *config.Plugins, out *v1alpha1.Plugins, s conversion.Scope) error { + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.QueueSort = nil + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PreFilter = nil + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Filter = nil + } + if in.PostFilter != nil { + in, out := &in.PostFilter, &out.PostFilter + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PostFilter = nil + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Score = nil + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Reserve = nil + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Permit = nil + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PreBind = nil + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Bind = nil + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.PostBind = nil + } + if in.Unreserve != nil { + in, out := &in.Unreserve, &out.Unreserve + *out = new(v1alpha1.PluginSet) + if err := Convert_config_PluginSet_To_v1alpha1_PluginSet(*in, *out, s); err != nil { + return err + } + } else { + out.Unreserve = nil + } + return nil +} + +// Convert_config_Plugins_To_v1alpha1_Plugins is an autogenerated conversion function. +func Convert_config_Plugins_To_v1alpha1_Plugins(in *config.Plugins, out *v1alpha1.Plugins, s conversion.Scope) error { + return autoConvert_config_Plugins_To_v1alpha1_Plugins(in, out, s) +} + +func autoConvert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in *v1alpha1.SchedulerAlgorithmSource, out *config.SchedulerAlgorithmSource, s conversion.Scope) error { + out.Policy = (*config.SchedulerPolicySource)(unsafe.Pointer(in.Policy)) + out.Provider = (*string)(unsafe.Pointer(in.Provider)) + return nil +} + +// Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in *v1alpha1.SchedulerAlgorithmSource, out *config.SchedulerAlgorithmSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in, out, s) +} + +func autoConvert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *config.SchedulerAlgorithmSource, out *v1alpha1.SchedulerAlgorithmSource, s conversion.Scope) error { + out.Policy = (*v1alpha1.SchedulerPolicySource)(unsafe.Pointer(in.Policy)) + out.Provider = (*string)(unsafe.Pointer(in.Provider)) + return nil +} + +// Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource is an autogenerated conversion function. +func Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *config.SchedulerAlgorithmSource, out *v1alpha1.SchedulerAlgorithmSource, s conversion.Scope) error { + return autoConvert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in, out, s) +} + +func autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in *v1alpha1.SchedulerPolicyConfigMapSource, out *config.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + return nil +} + +// Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in *v1alpha1.SchedulerPolicyConfigMapSource, out *config.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in, out, s) +} + +func autoConvert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *config.SchedulerPolicyConfigMapSource, out *v1alpha1.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + return nil +} + +// Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource is an autogenerated conversion function. +func Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *config.SchedulerPolicyConfigMapSource, out *v1alpha1.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + return autoConvert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in, out, s) +} + +func autoConvert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in *v1alpha1.SchedulerPolicyFileSource, out *config.SchedulerPolicyFileSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in *v1alpha1.SchedulerPolicyFileSource, out *config.SchedulerPolicyFileSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in, out, s) +} + +func autoConvert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *config.SchedulerPolicyFileSource, out *v1alpha1.SchedulerPolicyFileSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource is an autogenerated conversion function. +func Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *config.SchedulerPolicyFileSource, out *v1alpha1.SchedulerPolicyFileSource, s conversion.Scope) error { + return autoConvert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in, out, s) +} + +func autoConvert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in *v1alpha1.SchedulerPolicySource, out *config.SchedulerPolicySource, s conversion.Scope) error { + out.File = (*config.SchedulerPolicyFileSource)(unsafe.Pointer(in.File)) + out.ConfigMap = (*config.SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in *v1alpha1.SchedulerPolicySource, out *config.SchedulerPolicySource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in, out, s) +} + +func autoConvert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *config.SchedulerPolicySource, out *v1alpha1.SchedulerPolicySource, s conversion.Scope) error { + out.File = (*v1alpha1.SchedulerPolicyFileSource)(unsafe.Pointer(in.File)) + out.ConfigMap = (*v1alpha1.SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource is an autogenerated conversion function. +func Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *config.SchedulerPolicySource, out *v1alpha1.SchedulerPolicySource, s conversion.Scope) error { + return autoConvert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..0ec19467c40 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go new file mode 100644 index 00000000000..60e98cb8ad0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,40 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1alpha1 "k8s.io/kube-scheduler/config/v1alpha1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1alpha1.KubeSchedulerConfiguration{}, func(obj interface{}) { + SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1alpha1.KubeSchedulerConfiguration)) + }) + return nil +} + +func SetObjectDefaults_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration) { + SetDefaults_KubeSchedulerConfiguration(in) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/BUILD similarity index 53% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/BUILD index 9d21dc6aae6..b2c8d728093 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/BUILD @@ -1,22 +1,19 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["validation.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/api/validation", + importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/validation", + visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/apis/config:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/component-base/config/validation:go_default_library", ], ) @@ -24,7 +21,11 @@ go_test( name = "go_default_test", srcs = ["validation_test.go"], embed = [":go_default_library"], - deps = ["//pkg/scheduler/api:go_default_library"], + deps = [ + "//pkg/scheduler/apis/config:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/component-base/config:go_default_library", + ], ) filegroup( @@ -38,4 +39,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go new file mode 100644 index 00000000000..abd58c88646 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + "fmt" + + v1 "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + componentbasevalidation "k8s.io/component-base/config/validation" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/scheduler/apis/config" +) + +// ValidateKubeSchedulerConfiguration ensures validation of the KubeSchedulerConfiguration struct +func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, componentbasevalidation.ValidateClientConnectionConfiguration(&cc.ClientConnection, field.NewPath("clientConnection"))...) + allErrs = append(allErrs, ValidateKubeSchedulerLeaderElectionConfiguration(&cc.LeaderElection, field.NewPath("leaderElection"))...) + if len(cc.SchedulerName) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("schedulerName"), "")) + } + for _, msg := range validation.IsValidSocketAddr(cc.HealthzBindAddress) { + allErrs = append(allErrs, field.Invalid(field.NewPath("healthzBindAddress"), cc.HealthzBindAddress, msg)) + } + for _, msg := range validation.IsValidSocketAddr(cc.MetricsBindAddress) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metricsBindAddress"), cc.MetricsBindAddress, msg)) + } + if cc.HardPodAffinitySymmetricWeight < 0 || cc.HardPodAffinitySymmetricWeight > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath("hardPodAffinitySymmetricWeight"), cc.HardPodAffinitySymmetricWeight, "not in valid range [0-100]")) + } + if cc.PercentageOfNodesToScore < 0 || cc.PercentageOfNodesToScore > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath("percentageOfNodesToScore"), + cc.PercentageOfNodesToScore, "not in valid range [0-100]")) + } + if cc.PodInitialBackoffSeconds <= 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("podInitialBackoffSeconds"), + cc.PodInitialBackoffSeconds, "must be greater than 0")) + } + if cc.PodMaxBackoffSeconds < cc.PodInitialBackoffSeconds { + allErrs = append(allErrs, field.Invalid(field.NewPath("podMaxBackoffSeconds"), + cc.PodMaxBackoffSeconds, "must be greater than or equal to PodInitialBackoffSeconds")) + } + return allErrs +} + +// ValidateKubeSchedulerLeaderElectionConfiguration ensures validation of the KubeSchedulerLeaderElectionConfiguration struct +func ValidateKubeSchedulerLeaderElectionConfiguration(cc *config.KubeSchedulerLeaderElectionConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !cc.LeaderElectionConfiguration.LeaderElect { + return allErrs + } + allErrs = append(allErrs, componentbasevalidation.ValidateLeaderElectionConfiguration(&cc.LeaderElectionConfiguration, field.NewPath("leaderElectionConfiguration"))...) + return allErrs +} + +// ValidatePolicy checks for errors in the Config +// It does not return early so that it can find as many errors as possible +func ValidatePolicy(policy config.Policy) error { + var validationErrors []error + + priorities := make(map[string]config.PriorityPolicy, len(policy.Priorities)) + for _, priority := range policy.Priorities { + if priority.Weight <= 0 || priority.Weight >= config.MaxWeight { + validationErrors = append(validationErrors, fmt.Errorf("Priority %s should have a positive weight applied to it or it has overflown", priority.Name)) + } + validationErrors = append(validationErrors, validateCustomPriorities(priorities, priority)) + } + + binders := 0 + extenderManagedResources := sets.NewString() + for _, extender := range policy.Extenders { + if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 { + validationErrors = append(validationErrors, fmt.Errorf("Priority for extender %s should have a positive weight applied to it", extender.URLPrefix)) + } + if extender.BindVerb != "" { + binders++ + } + for _, resource := range extender.ManagedResources { + errs := validateExtendedResourceName(v1.ResourceName(resource.Name)) + if len(errs) != 0 { + validationErrors = append(validationErrors, errs...) + } + if extenderManagedResources.Has(resource.Name) { + validationErrors = append(validationErrors, fmt.Errorf("Duplicate extender managed resource name %s", string(resource.Name))) + } + extenderManagedResources.Insert(resource.Name) + } + } + if binders > 1 { + validationErrors = append(validationErrors, fmt.Errorf("Only one extender can implement bind, found %v", binders)) + } + return utilerrors.NewAggregate(validationErrors) +} + +// validateCustomPriorities validates that: +// 1. RequestedToCapacityRatioRedeclared custom priority cannot be declared multiple times, +// 2. LabelPreference/ServiceAntiAffinity custom priorities can be declared multiple times, +// however the weights for each custom priority type should be the same. +func validateCustomPriorities(priorities map[string]config.PriorityPolicy, priority config.PriorityPolicy) error { + verifyRedeclaration := func(priorityType string) error { + if existing, alreadyDeclared := priorities[priorityType]; alreadyDeclared { + return fmt.Errorf("Priority %q redeclares custom priority %q, from:%q", priority.Name, priorityType, existing.Name) + } + priorities[priorityType] = priority + return nil + } + verifyDifferentWeights := func(priorityType string) error { + if existing, alreadyDeclared := priorities[priorityType]; alreadyDeclared { + if existing.Weight != priority.Weight { + return fmt.Errorf("%s priority %q has a different weight with %q", priorityType, priority.Name, existing.Name) + } + } + priorities[priorityType] = priority + return nil + } + if priority.Argument != nil { + if priority.Argument.LabelPreference != nil { + if err := verifyDifferentWeights("LabelPreference"); err != nil { + return err + } + } else if priority.Argument.ServiceAntiAffinity != nil { + if err := verifyDifferentWeights("ServiceAntiAffinity"); err != nil { + return err + } + } else if priority.Argument.RequestedToCapacityRatioArguments != nil { + if err := verifyRedeclaration("RequestedToCapacityRatio"); err != nil { + return err + } + } else { + return fmt.Errorf("No priority arguments set for priority %s", priority.Name) + } + } + return nil +} + +// validateExtendedResourceName checks whether the specified name is a valid +// extended resource name. +func validateExtendedResourceName(name v1.ResourceName) []error { + var validationErrors []error + for _, msg := range validation.IsQualifiedName(string(name)) { + validationErrors = append(validationErrors, errors.New(msg)) + } + if len(validationErrors) != 0 { + return validationErrors + } + if !v1helper.IsExtendedResourceName(name) { + validationErrors = append(validationErrors, fmt.Errorf("%s is an invalid extended resource name", name)) + } + return validationErrors +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go index 746a51b2fbf..20b0f03e6e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go @@ -24,6 +24,79 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extender) DeepCopyInto(out *Extender) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(ExtenderTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]ExtenderManagedResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. +func (in *Extender) DeepCopy() *Extender { + if in == nil { + return nil + } + out := new(Extender) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. +func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { + if in == nil { + return nil + } + out := new(ExtenderManagedResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { + *out = *in + if in.CertData != nil { + in, out := &in.CertData, &out.CertData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CAData != nil { + in, out := &in.CAData, &out.CAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. +func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { + if in == nil { + return nil + } + out := new(ExtenderTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { *out = *in @@ -32,11 +105,6 @@ func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfigurati out.LeaderElection = in.LeaderElection out.ClientConnection = in.ClientConnection out.DebuggingConfiguration = in.DebuggingConfiguration - if in.BindTimeoutSeconds != nil { - in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds - *out = new(int64) - **out = **in - } if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(Plugins) @@ -87,6 +155,43 @@ func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLea return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelPreference) DeepCopyInto(out *LabelPreference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference. +func (in *LabelPreference) DeepCopy() *LabelPreference { + if in == nil { + return nil + } + out := new(LabelPreference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence. +func (in *LabelsPresence) DeepCopy() *LabelsPresence { + if in == nil { + return nil + } + out := new(LabelsPresence) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Plugin) DeepCopyInto(out *Plugin) { *out = *in @@ -217,6 +322,193 @@ func (in *Plugins) DeepCopy() *Plugins { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Predicates != nil { + in, out := &in.Predicates, &out.Predicates + *out = make([]PredicatePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priorities != nil { + in, out := &in.Priorities, &out.Priorities + *out = make([]PriorityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]Extender, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) { + *out = *in + if in.ServiceAffinity != nil { + in, out := &in.ServiceAffinity, &out.ServiceAffinity + *out = new(ServiceAffinity) + (*in).DeepCopyInto(*out) + } + if in.LabelsPresence != nil { + in, out := &in.LabelsPresence, &out.LabelsPresence + *out = new(LabelsPresence) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument. +func (in *PredicateArgument) DeepCopy() *PredicateArgument { + if in == nil { + return nil + } + out := new(PredicateArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PredicateArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy. +func (in *PredicatePolicy) DeepCopy() *PredicatePolicy { + if in == nil { + return nil + } + out := new(PredicatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) { + *out = *in + if in.ServiceAntiAffinity != nil { + in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity + *out = new(ServiceAntiAffinity) + **out = **in + } + if in.LabelPreference != nil { + in, out := &in.LabelPreference, &out.LabelPreference + *out = new(LabelPreference) + **out = **in + } + if in.RequestedToCapacityRatioArguments != nil { + in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments + *out = new(RequestedToCapacityRatioArguments) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument. +func (in *PriorityArgument) DeepCopy() *PriorityArgument { + if in == nil { + return nil + } + out := new(PriorityArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PriorityArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy. +func (in *PriorityPolicy) DeepCopy() *PriorityPolicy { + if in == nil { + return nil + } + out := new(PriorityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapacityRatioArguments) { + *out = *in + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = make([]UtilizationShapePoint, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArguments. +func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRatioArguments { + if in == nil { + return nil + } + out := new(RequestedToCapacityRatioArguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchedulerAlgorithmSource) DeepCopyInto(out *SchedulerAlgorithmSource) { *out = *in @@ -300,3 +592,56 @@ func (in *SchedulerPolicySource) DeepCopy() *SchedulerPolicySource { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity. +func (in *ServiceAffinity) DeepCopy() *ServiceAffinity { + if in == nil { + return nil + } + out := new(ServiceAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity. +func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity { + if in == nil { + return nil + } + out := new(ServiceAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. +func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { + if in == nil { + return nil + } + out := new(UtilizationShapePoint) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/BUILD new file mode 100644 index 00000000000..a4d7ca21ba5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/doc.go similarity index 84% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/doc.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/doc.go index 3386c4d8d21..856d4c628d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // Package v1 contains scheduler API objects. -package v1 // import "k8s.io/kubernetes/pkg/scheduler/api/v1" +package v1 // import "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/types.go new file mode 100644 index 00000000000..fdfce41c740 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // MinExtenderPriority defines the min priority value for extender. + MinExtenderPriority int64 = 0 + + // MaxExtenderPriority defines the max priority value for extender. + MaxExtenderPriority int64 = 10 +) + +// ExtenderPreemptionResult represents the result returned by preemption phase of extender. +type ExtenderPreemptionResult struct { + NodeNameToMetaVictims map[string]*MetaVictims +} + +// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes. +type ExtenderPreemptionArgs struct { + // Pod being scheduled + Pod *v1.Pod + // Victims map generated by scheduler preemption phase + // Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims. + NodeNameToVictims map[string]*Victims + NodeNameToMetaVictims map[string]*MetaVictims +} + +// Victims represents: +// pods: a group of pods expected to be preempted. +// numPDBViolations: the count of violations of PodDisruptionBudget +type Victims struct { + Pods []*v1.Pod + NumPDBViolations int64 +} + +// MetaPod represent identifier for a v1.Pod +type MetaPod struct { + UID string +} + +// MetaVictims represents: +// pods: a group of pods expected to be preempted. +// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. +// numPDBViolations: the count of violations of PodDisruptionBudget +type MetaVictims struct { + Pods []*MetaPod + NumPDBViolations int64 +} + +// ExtenderArgs represents the arguments needed by the extender to filter/prioritize +// nodes for a pod. +type ExtenderArgs struct { + // Pod being scheduled + Pod *v1.Pod + // List of candidate nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == false + Nodes *v1.NodeList + // List of candidate node names where the pod can be scheduled; to be + // populated only if ExtenderConfig.NodeCacheCapable == true + NodeNames *[]string +} + +// FailedNodesMap represents the filtered out nodes, with node names and failure messages +type FailedNodesMap map[string]string + +// ExtenderFilterResult represents the results of a filter call to an extender +type ExtenderFilterResult struct { + // Filtered set of nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == false + Nodes *v1.NodeList + // Filtered set of nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == true + NodeNames *[]string + // Filtered out nodes where the pod can't be scheduled and the failure messages + FailedNodes FailedNodesMap + // Error message indicating failure + Error string +} + +// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node. +type ExtenderBindingArgs struct { + // PodName is the name of the pod being bound + PodName string + // PodNamespace is the namespace of the pod being bound + PodNamespace string + // PodUID is the UID of the pod being bound + PodUID types.UID + // Node selected by the scheduler + Node string +} + +// ExtenderBindingResult represents the result of binding of a pod to a node from an extender. +type ExtenderBindingResult struct { + // Error message indicating failure + Error string +} + +// HostPriority represents the priority of scheduling to a particular host, higher priority is better. +type HostPriority struct { + // Name of the host + Host string + // Score associated with the host + Score int64 +} + +// HostPriorityList declares a []HostPriority type. +type HostPriorityList []HostPriority diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e568fe00a78 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/extender/v1/zz_generated.deepcopy.go @@ -0,0 +1,339 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(corev1.Pod) + (*in).DeepCopyInto(*out) + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(corev1.NodeList) + (*in).DeepCopyInto(*out) + } + if in.NodeNames != nil { + in, out := &in.NodeNames, &out.NodeNames + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs. +func (in *ExtenderArgs) DeepCopy() *ExtenderArgs { + if in == nil { + return nil + } + out := new(ExtenderArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs. +func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs { + if in == nil { + return nil + } + out := new(ExtenderBindingArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult. +func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult { + if in == nil { + return nil + } + out := new(ExtenderBindingResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(corev1.NodeList) + (*in).DeepCopyInto(*out) + } + if in.NodeNames != nil { + in, out := &in.NodeNames, &out.NodeNames + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.FailedNodes != nil { + in, out := &in.FailedNodes, &out.FailedNodes + *out = make(FailedNodesMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult. +func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult { + if in == nil { + return nil + } + out := new(ExtenderFilterResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(corev1.Pod) + (*in).DeepCopyInto(*out) + } + if in.NodeNameToVictims != nil { + in, out := &in.NodeNameToVictims, &out.NodeNameToVictims + *out = make(map[string]*Victims, len(*in)) + for key, val := range *in { + var outVal *Victims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(Victims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.NodeNameToMetaVictims != nil { + in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims + *out = make(map[string]*MetaVictims, len(*in)) + for key, val := range *in { + var outVal *MetaVictims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(MetaVictims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs. +func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs { + if in == nil { + return nil + } + out := new(ExtenderPreemptionArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) { + *out = *in + if in.NodeNameToMetaVictims != nil { + in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims + *out = make(map[string]*MetaVictims, len(*in)) + for key, val := range *in { + var outVal *MetaVictims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(MetaVictims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult. +func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult { + if in == nil { + return nil + } + out := new(ExtenderPreemptionResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) { + { + in := &in + *out = make(FailedNodesMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap. +func (in FailedNodesMap) DeepCopy() FailedNodesMap { + if in == nil { + return nil + } + out := new(FailedNodesMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPriority) DeepCopyInto(out *HostPriority) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority. +func (in *HostPriority) DeepCopy() *HostPriority { + if in == nil { + return nil + } + out := new(HostPriority) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) { + { + in := &in + *out = make(HostPriorityList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList. +func (in HostPriorityList) DeepCopy() HostPriorityList { + if in == nil { + return nil + } + out := new(HostPriorityList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetaPod) DeepCopyInto(out *MetaPod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod. +func (in *MetaPod) DeepCopy() *MetaPod { + if in == nil { + return nil + } + out := new(MetaPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetaVictims) DeepCopyInto(out *MetaVictims) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]*MetaPod, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetaPod) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims. +func (in *MetaVictims) DeepCopy() *MetaVictims { + if in == nil { + return nil + } + out := new(MetaVictims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Victims) DeepCopyInto(out *Victims) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]*corev1.Pod, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1.Pod) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims. +func (in *Victims) DeepCopy() *Victims { + if in == nil { + return nil + } + out := new(Victims) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD index b4274a22ffe..0e573e4ef04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD @@ -9,15 +9,19 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/core", visibility = ["//visibility:public"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -28,6 +32,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -43,19 +48,21 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", - "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -64,6 +71,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go index b5a478a6caa..81fe0f66ea0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go @@ -29,7 +29,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" + extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -45,14 +46,14 @@ type HTTPExtender struct { filterVerb string prioritizeVerb string bindVerb string - weight int + weight int64 client *http.Client nodeCacheCapable bool managedResources sets.String ignorable bool } -func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, error) { +func makeTransport(config *schedulerapi.Extender) (http.RoundTripper, error) { var cfg restclient.Config if config.TLSConfig != nil { cfg.TLSClientConfig.Insecure = config.TLSConfig.Insecure @@ -83,7 +84,7 @@ func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, erro } // NewHTTPExtender creates an HTTPExtender object. -func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerExtender, error) { +func NewHTTPExtender(config *schedulerapi.Extender) (algorithm.SchedulerExtender, error) { if config.HTTPTimeout.Nanoseconds() == 0 { config.HTTPTimeout = time.Duration(DefaultExtenderTimeout) } @@ -114,6 +115,36 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerEx }, nil } +// Equal is used to check if two extenders are equal +// ignoring the client field, exported for testing +func Equal(e1, e2 *HTTPExtender) bool { + if e1.extenderURL != e2.extenderURL { + return false + } + if e1.preemptVerb != e2.preemptVerb { + return false + } + if e1.prioritizeVerb != e2.prioritizeVerb { + return false + } + if e1.bindVerb != e2.bindVerb { + return false + } + if e1.weight != e2.weight { + return false + } + if e1.nodeCacheCapable != e2.nodeCacheCapable { + return false + } + if !e1.managedResources.Equal(e2.managedResources) { + return false + } + if e1.ignorable != e2.ignorable { + return false + } + return true +} + // Name returns extenderURL to identify the extender. func (h *HTTPExtender) Name() string { return h.extenderURL @@ -134,12 +165,12 @@ func (h *HTTPExtender) SupportsPreemption() bool { // ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender. func (h *HTTPExtender) ProcessPreemption( pod *v1.Pod, - nodeToVictims map[*v1.Node]*schedulerapi.Victims, + nodeToVictims map[*v1.Node]*extenderv1.Victims, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, -) (map[*v1.Node]*schedulerapi.Victims, error) { +) (map[*v1.Node]*extenderv1.Victims, error) { var ( - result schedulerapi.ExtenderPreemptionResult - args *schedulerapi.ExtenderPreemptionArgs + result extenderv1.ExtenderPreemptionResult + args *extenderv1.ExtenderPreemptionArgs ) if !h.SupportsPreemption() { @@ -149,13 +180,13 @@ func (h *HTTPExtender) ProcessPreemption( if h.nodeCacheCapable { // If extender has cached node info, pass NodeNameToMetaVictims in args. nodeNameToMetaVictims := convertToNodeNameToMetaVictims(nodeToVictims) - args = &schedulerapi.ExtenderPreemptionArgs{ + args = &extenderv1.ExtenderPreemptionArgs{ Pod: pod, NodeNameToMetaVictims: nodeNameToMetaVictims, } } else { nodeNameToVictims := convertToNodeNameToVictims(nodeToVictims) - args = &schedulerapi.ExtenderPreemptionArgs{ + args = &extenderv1.ExtenderPreemptionArgs{ Pod: pod, NodeNameToVictims: nodeNameToVictims, } @@ -178,12 +209,12 @@ func (h *HTTPExtender) ProcessPreemption( // convertToNodeToVictims converts "nodeNameToMetaVictims" from object identifiers, // such as UIDs and names, to object pointers. func (h *HTTPExtender) convertToNodeToVictims( - nodeNameToMetaVictims map[string]*schedulerapi.MetaVictims, + nodeNameToMetaVictims map[string]*extenderv1.MetaVictims, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, -) (map[*v1.Node]*schedulerapi.Victims, error) { - nodeToVictims := map[*v1.Node]*schedulerapi.Victims{} +) (map[*v1.Node]*extenderv1.Victims, error) { + nodeToVictims := map[*v1.Node]*extenderv1.Victims{} for nodeName, metaVictims := range nodeNameToMetaVictims { - victims := &schedulerapi.Victims{ + victims := &extenderv1.Victims{ Pods: []*v1.Pod{}, } for _, metaPod := range metaVictims.Pods { @@ -203,7 +234,7 @@ func (h *HTTPExtender) convertToNodeToVictims( // It should return error if there's cache inconsistency between default scheduler and extender // so that this pod or node is missing from nodeNameToInfo. func (h *HTTPExtender) convertPodUIDToPod( - metaPod *schedulerapi.MetaPod, + metaPod *extenderv1.MetaPod, nodeName string, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) (*v1.Pod, error) { var nodeInfo *schedulernodeinfo.NodeInfo @@ -223,15 +254,15 @@ func (h *HTTPExtender) convertPodUIDToPod( // convertToNodeNameToMetaVictims converts from struct type to meta types. func convertToNodeNameToMetaVictims( - nodeToVictims map[*v1.Node]*schedulerapi.Victims, -) map[string]*schedulerapi.MetaVictims { - nodeNameToVictims := map[string]*schedulerapi.MetaVictims{} + nodeToVictims map[*v1.Node]*extenderv1.Victims, +) map[string]*extenderv1.MetaVictims { + nodeNameToVictims := map[string]*extenderv1.MetaVictims{} for node, victims := range nodeToVictims { - metaVictims := &schedulerapi.MetaVictims{ - Pods: []*schedulerapi.MetaPod{}, + metaVictims := &extenderv1.MetaVictims{ + Pods: []*extenderv1.MetaPod{}, } for _, pod := range victims.Pods { - metaPod := &schedulerapi.MetaPod{ + metaPod := &extenderv1.MetaPod{ UID: string(pod.UID), } metaVictims.Pods = append(metaVictims.Pods, metaPod) @@ -243,9 +274,9 @@ func convertToNodeNameToMetaVictims( // convertToNodeNameToVictims converts from node type to node name as key. func convertToNodeNameToVictims( - nodeToVictims map[*v1.Node]*schedulerapi.Victims, -) map[string]*schedulerapi.Victims { - nodeNameToVictims := map[string]*schedulerapi.Victims{} + nodeToVictims map[*v1.Node]*extenderv1.Victims, +) map[string]*extenderv1.Victims { + nodeNameToVictims := map[string]*extenderv1.Victims{} for node, victims := range nodeToVictims { nodeNameToVictims[node.GetName()] = victims } @@ -258,17 +289,17 @@ func convertToNodeNameToVictims( func (h *HTTPExtender) Filter( pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, -) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { +) ([]*v1.Node, extenderv1.FailedNodesMap, error) { var ( - result schedulerapi.ExtenderFilterResult + result extenderv1.ExtenderFilterResult nodeList *v1.NodeList nodeNames *[]string nodeResult []*v1.Node - args *schedulerapi.ExtenderArgs + args *extenderv1.ExtenderArgs ) if h.filterVerb == "" { - return nodes, schedulerapi.FailedNodesMap{}, nil + return nodes, extenderv1.FailedNodesMap{}, nil } if h.nodeCacheCapable { @@ -284,7 +315,7 @@ func (h *HTTPExtender) Filter( } } - args = &schedulerapi.ExtenderArgs{ + args = &extenderv1.ExtenderArgs{ Pod: pod, Nodes: nodeList, NodeNames: nodeNames, @@ -321,18 +352,18 @@ func (h *HTTPExtender) Filter( // Prioritize based on extender implemented priority functions. Weight*priority is added // up for each such priority function. The returned score is added to the score computed // by Kubernetes scheduler. The total score is used to do the host selection. -func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { +func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.HostPriorityList, int64, error) { var ( - result schedulerapi.HostPriorityList + result extenderv1.HostPriorityList nodeList *v1.NodeList nodeNames *[]string - args *schedulerapi.ExtenderArgs + args *extenderv1.ExtenderArgs ) if h.prioritizeVerb == "" { - result := schedulerapi.HostPriorityList{} + result := extenderv1.HostPriorityList{} for _, node := range nodes { - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: 0}) + result = append(result, extenderv1.HostPriority{Host: node.Name, Score: 0}) } return &result, 0, nil } @@ -350,7 +381,7 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi. } } - args = &schedulerapi.ExtenderArgs{ + args = &extenderv1.ExtenderArgs{ Pod: pod, Nodes: nodeList, NodeNames: nodeNames, @@ -364,12 +395,12 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi. // Bind delegates the action of binding a pod to a node to the extender. func (h *HTTPExtender) Bind(binding *v1.Binding) error { - var result schedulerapi.ExtenderBindingResult + var result extenderv1.ExtenderBindingResult if !h.IsBinder() { // This shouldn't happen as this extender wouldn't have become a Binder. return fmt.Errorf("Unexpected empty bindVerb in extender") } - req := &schedulerapi.ExtenderBindingArgs{ + req := &extenderv1.ExtenderBindingArgs{ PodName: binding.Name, PodNamespace: binding.Namespace, PodUID: binding.UID, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go index 02641c1dd26..9001b7255d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go @@ -35,16 +35,20 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" corelisters "k8s.io/client-go/listers/core/v1" + policylisters "k8s.io/client-go/listers/policy/v1beta1" "k8s.io/client-go/util/workqueue" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" utiltrace "k8s.io/utils/trace" @@ -63,27 +67,6 @@ const ( minFeasibleNodesPercentageToFind = 5 ) -var unresolvablePredicateFailureErrors = map[predicates.PredicateFailureReason]struct{}{ - predicates.ErrNodeSelectorNotMatch: {}, - predicates.ErrPodAffinityRulesNotMatch: {}, - predicates.ErrPodNotMatchHostName: {}, - predicates.ErrTaintsTolerationsNotMatch: {}, - predicates.ErrNodeLabelPresenceViolated: {}, - // Node conditions won't change when scheduler simulates removal of preemption victims. - // So, it is pointless to try nodes that have not been able to host the pod due to node - // conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, .... - predicates.ErrNodeNotReady: {}, - predicates.ErrNodeNetworkUnavailable: {}, - predicates.ErrNodeUnderDiskPressure: {}, - predicates.ErrNodeUnderPIDPressure: {}, - predicates.ErrNodeUnderMemoryPressure: {}, - predicates.ErrNodeUnschedulable: {}, - predicates.ErrNodeUnknownCondition: {}, - predicates.ErrVolumeZoneConflict: {}, - predicates.ErrVolumeNodeConflict: {}, - predicates.ErrVolumeBindConflict: {}, -} - // FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type. type FailedPredicateMap map[string][]predicates.PredicateFailureReason @@ -132,18 +115,25 @@ func (f *FitError) Error() string { // onto machines. // TODO: Rename this type. type ScheduleAlgorithm interface { - Schedule(*v1.Pod, *framework.PluginContext) (scheduleResult ScheduleResult, err error) + Schedule(context.Context, *framework.CycleState, *v1.Pod) (scheduleResult ScheduleResult, err error) // Preempt receives scheduling errors for a pod and tries to create room for // the pod by preempting lower priority pods if possible. // It returns the node where preemption happened, a list of preempted pods, a // list of pods whose nominated node name should be removed, and error if any. - Preempt(*v1.Pod, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) + Preempt(context.Context, *framework.CycleState, *v1.Pod, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) // Predicates() returns a pointer to a map of predicate functions. This is // exposed for testing. Predicates() map[string]predicates.FitPredicate // Prioritizers returns a slice of priority config. This is exposed for // testing. Prioritizers() []priorities.PriorityConfig + // Extenders returns a slice of extender config. This is exposed for + // testing. + Extenders() []algorithm.SchedulerExtender + // GetPredicateMetadataProducer returns the predicate metadata producer. This is needed + // for cluster autoscaler integration. + // TODO(ahg-g): remove this once CA migrates to creating a Framework instead of a full scheduler. + PredicateMetadataProducer() predicates.MetadataProducer } // ScheduleResult represents the result of one pod scheduled. It will contain @@ -161,19 +151,20 @@ type genericScheduler struct { cache internalcache.Cache schedulingQueue internalqueue.SchedulingQueue predicates map[string]predicates.FitPredicate - priorityMetaProducer priorities.PriorityMetadataProducer - predicateMetaProducer predicates.PredicateMetadataProducer + priorityMetaProducer priorities.MetadataProducer + predicateMetaProducer predicates.MetadataProducer prioritizers []priorities.PriorityConfig framework framework.Framework extenders []algorithm.SchedulerExtender alwaysCheckAllPredicates bool - nodeInfoSnapshot *schedulernodeinfo.Snapshot + nodeInfoSnapshot *nodeinfosnapshot.Snapshot volumeBinder *volumebinder.VolumeBinder pvcLister corelisters.PersistentVolumeClaimLister - pdbLister algorithm.PDBLister + pdbLister policylisters.PodDisruptionBudgetLister disablePreemption bool percentageOfNodesToScore int32 enableNonPreempting bool + nextStartNodeIndex int } // snapshot snapshots scheduler cache and node infos for all fit and priority @@ -183,41 +174,49 @@ func (g *genericScheduler) snapshot() error { return g.cache.UpdateNodeInfoSnapshot(g.nodeInfoSnapshot) } +// GetPredicateMetadataProducer returns the predicate metadata producer. This is needed +// for cluster autoscaler integration. +func (g *genericScheduler) PredicateMetadataProducer() predicates.MetadataProducer { + return g.predicateMetaProducer +} + // Schedule tries to schedule the given pod to one of the nodes in the node list. // If it succeeds, it will return the name of the node. // If it fails, it will return a FitError error with reasons. -func (g *genericScheduler) Schedule(pod *v1.Pod, pluginContext *framework.PluginContext) (result ScheduleResult, err error) { +func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (result ScheduleResult, err error) { trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name}) defer trace.LogIfLong(100 * time.Millisecond) if err := podPassesBasicChecks(pod, g.pvcLister); err != nil { return result, err } + trace.Step("Basic checks done") - // Run "prefilter" plugins. - preFilterStatus := g.framework.RunPreFilterPlugins(pluginContext, pod) - if !preFilterStatus.IsSuccess() { - return result, preFilterStatus.AsError() + if err := g.snapshot(); err != nil { + return result, err } + trace.Step("Snapshoting scheduler cache and node infos done") - numNodes := g.cache.NodeTree().NumNodes() - if numNodes == 0 { + if len(g.nodeInfoSnapshot.NodeInfoList) == 0 { return result, ErrNoNodesAvailable } - if err := g.snapshot(); err != nil { - return result, err + // Run "prefilter" plugins. + preFilterStatus := g.framework.RunPreFilterPlugins(ctx, state, pod) + if !preFilterStatus.IsSuccess() { + return result, preFilterStatus.AsError() } + trace.Step("Running prefilter plugins done") - trace.Step("Basic checks done") startPredicateEvalTime := time.Now() - filteredNodes, failedPredicateMap, filteredNodesStatuses, err := g.findNodesThatFit(pluginContext, pod) + filteredNodes, failedPredicateMap, filteredNodesStatuses, err := g.findNodesThatFit(ctx, state, pod) if err != nil { return result, err } + trace.Step("Computing predicates done") // Run "postfilter" plugins. - postfilterStatus := g.framework.RunPostFilterPlugins(pluginContext, pod, filteredNodes, filteredNodesStatuses) + postfilterStatus := g.framework.RunPostFilterPlugins(ctx, state, pod, filteredNodes, filteredNodesStatuses) if !postfilterStatus.IsSuccess() { return result, postfilterStatus.AsError() } @@ -225,12 +224,12 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, pluginContext *framework.Plugin if len(filteredNodes) == 0 { return result, &FitError{ Pod: pod, - NumAllNodes: numNodes, + NumAllNodes: len(g.nodeInfoSnapshot.NodeInfoList), FailedPredicates: failedPredicateMap, FilteredNodesStatuses: filteredNodesStatuses, } } - trace.Step("Computing predicates done") + trace.Step("Running postfilter plugins done") metrics.SchedulingAlgorithmPredicateEvaluationDuration.Observe(metrics.SinceInSeconds(startPredicateEvalTime)) metrics.DeprecatedSchedulingAlgorithmPredicateEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPredicateEvalTime)) metrics.SchedulingLatency.WithLabelValues(metrics.PredicateEvaluation).Observe(metrics.SinceInSeconds(startPredicateEvalTime)) @@ -243,27 +242,28 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, pluginContext *framework.Plugin metrics.DeprecatedSchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) return ScheduleResult{ SuggestedHost: filteredNodes[0].Name, - EvaluatedNodes: 1 + len(failedPredicateMap), + EvaluatedNodes: 1 + len(failedPredicateMap) + len(filteredNodesStatuses), FeasibleNodes: 1, }, nil } - metaPrioritiesInterface := g.priorityMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap) - priorityList, err := PrioritizeNodes(pod, g.nodeInfoSnapshot.NodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders, g.framework, pluginContext) + metaPrioritiesInterface := g.priorityMetaProducer(pod, filteredNodes, g.nodeInfoSnapshot) + priorityList, err := g.prioritizeNodes(ctx, state, pod, metaPrioritiesInterface, filteredNodes) if err != nil { return result, err } - trace.Step("Prioritizing done") + metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInSeconds(startPriorityEvalTime)) metrics.DeprecatedSchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) metrics.SchedulingLatency.WithLabelValues(metrics.PriorityEvaluation).Observe(metrics.SinceInSeconds(startPriorityEvalTime)) metrics.DeprecatedSchedulingLatency.WithLabelValues(metrics.PriorityEvaluation).Observe(metrics.SinceInSeconds(startPriorityEvalTime)) host, err := g.selectHost(priorityList) - trace.Step("Selecting host done") + trace.Step("Prioritizing done") + return ScheduleResult{ SuggestedHost: host, - EvaluatedNodes: len(filteredNodes) + len(failedPredicateMap), + EvaluatedNodes: len(filteredNodes) + len(failedPredicateMap) + len(filteredNodesStatuses), FeasibleNodes: len(filteredNodes), }, err } @@ -280,25 +280,29 @@ func (g *genericScheduler) Predicates() map[string]predicates.FitPredicate { return g.predicates } +func (g *genericScheduler) Extenders() []algorithm.SchedulerExtender { + return g.extenders +} + // selectHost takes a prioritized list of nodes and then picks one // in a reservoir sampling manner from the nodes that had the highest score. -func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) { - if len(priorityList) == 0 { +func (g *genericScheduler) selectHost(nodeScoreList framework.NodeScoreList) (string, error) { + if len(nodeScoreList) == 0 { return "", fmt.Errorf("empty priorityList") } - maxScore := priorityList[0].Score - selected := priorityList[0].Host + maxScore := nodeScoreList[0].Score + selected := nodeScoreList[0].Name cntOfMaxScore := 1 - for _, hp := range priorityList[1:] { - if hp.Score > maxScore { - maxScore = hp.Score - selected = hp.Host + for _, ns := range nodeScoreList[1:] { + if ns.Score > maxScore { + maxScore = ns.Score + selected = ns.Name cntOfMaxScore = 1 - } else if hp.Score == maxScore { + } else if ns.Score == maxScore { cntOfMaxScore++ if rand.Intn(cntOfMaxScore) == 0 { // Replace the candidate with probability of 1/cntOfMaxScore - selected = hp.Host + selected = ns.Name } } } @@ -317,7 +321,7 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList // other pods with the same priority. The nominated pod prevents other pods from // using the nominated resources and the nominated pod could take a long time // before it is retried after many other pending pods. -func (g *genericScheduler) Preempt(pod *v1.Pod, scheduleErr error) (*v1.Node, []*v1.Pod, []*v1.Pod, error) { +func (g *genericScheduler) Preempt(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scheduleErr error) (*v1.Node, []*v1.Pod, []*v1.Pod, error) { // Scheduler may return various types of errors. Consider preemption only if // the error is of type FitError. fitError, ok := scheduleErr.(*FitError) @@ -328,22 +332,26 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, scheduleErr error) (*v1.Node, [] klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name) return nil, nil, nil, nil } - allNodes := g.cache.ListNodes() - if len(allNodes) == 0 { + if len(g.nodeInfoSnapshot.NodeInfoMap) == 0 { return nil, nil, nil, ErrNoNodesAvailable } - potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError) + potentialNodes := nodesWherePreemptionMightHelp(g.nodeInfoSnapshot.NodeInfoMap, fitError) if len(potentialNodes) == 0 { klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name) // In this case, we should clean-up any existing nominated node name of the pod. return nil, nil, []*v1.Pod{pod}, nil } - pdbs, err := g.pdbLister.List(labels.Everything()) - if err != nil { - return nil, nil, nil, err + var ( + pdbs []*policy.PodDisruptionBudget + err error + ) + if g.pdbLister != nil { + pdbs, err = g.pdbLister.List(labels.Everything()) + if err != nil { + return nil, nil, nil, err + } } - nodeToVictims, err := selectNodesForPreemption(pod, g.nodeInfoSnapshot.NodeInfoMap, potentialNodes, g.predicates, - g.predicateMetaProducer, g.schedulingQueue, pdbs) + nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, potentialNodes, pdbs) if err != nil { return nil, nil, nil, err } @@ -378,8 +386,8 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, scheduleErr error) (*v1.Node, [] // processPreemptionWithExtenders processes preemption with extenders func (g *genericScheduler) processPreemptionWithExtenders( pod *v1.Pod, - nodeToVictims map[*v1.Node]*schedulerapi.Victims, -) (map[*v1.Node]*schedulerapi.Victims, error) { + nodeToVictims map[*v1.Node]*extenderv1.Victims, +) (map[*v1.Node]*extenderv1.Victims, error) { if len(nodeToVictims) > 0 { for _, extender := range g.extenders { if extender.SupportsPreemption() && extender.IsInterested(pod) { @@ -427,9 +435,9 @@ func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName s } var lowerPriorityPods []*v1.Pod - podPriority := util.GetPodPriority(pod) + podPriority := podutil.GetPodPriority(pod) for _, p := range pods { - if util.GetPodPriority(p) < podPriority { + if podutil.GetPodPriority(p) < podPriority { lowerPriorityPods = append(lowerPriorityPods, p) } } @@ -445,7 +453,8 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i adaptivePercentage := g.percentageOfNodesToScore if adaptivePercentage <= 0 { - adaptivePercentage = schedulerapi.DefaultPercentageOfNodesToScore - numAllNodes/125 + basePercentageOfNodesToScore := int32(50) + adaptivePercentage = basePercentageOfNodesToScore - numAllNodes/125 if adaptivePercentage < minFeasibleNodesPercentageToFind { adaptivePercentage = minFeasibleNodesPercentageToFind } @@ -461,82 +470,77 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i // Filters the nodes to find the ones that fit based on the given predicate functions // Each node is passed through the predicate functions to determine if it is a fit -func (g *genericScheduler) findNodesThatFit(pluginContext *framework.PluginContext, pod *v1.Pod) ([]*v1.Node, FailedPredicateMap, framework.NodeToStatusMap, error) { +func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, FailedPredicateMap, framework.NodeToStatusMap, error) { var filtered []*v1.Node failedPredicateMap := FailedPredicateMap{} filteredNodesStatuses := framework.NodeToStatusMap{} - if len(g.predicates) == 0 { - filtered = g.cache.ListNodes() + if len(g.predicates) == 0 && !g.framework.HasFilterPlugins() { + filtered = g.nodeInfoSnapshot.ListNodes() } else { - allNodes := int32(g.cache.NodeTree().NumNodes()) - numNodesToFind := g.numFeasibleNodesToFind(allNodes) + allNodes := len(g.nodeInfoSnapshot.NodeInfoList) + numNodesToFind := g.numFeasibleNodesToFind(int32(allNodes)) // Create filtered list with enough space to avoid growing it // and allow assigning. filtered = make([]*v1.Node, numNodesToFind) - errs := errors.MessageCountMap{} + errCh := util.NewErrorChannel() var ( predicateResultLock sync.Mutex filteredLen int32 ) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) // We can use the same metadata producer for all nodes. - meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap) + meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot) + state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta}) checkNode := func(i int) { - nodeName := g.cache.NodeTree().Next() - - fits, failedPredicates, err := podFitsOnNode( + // We check the nodes starting from where we left off in the previous scheduling cycle, + // this is to make sure all nodes have the same chance of being examined across pods. + nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes] + fits, failedPredicates, status, err := g.podFitsOnNode( + ctx, + state, pod, meta, - g.nodeInfoSnapshot.NodeInfoMap[nodeName], - g.predicates, - g.schedulingQueue, + nodeInfo, g.alwaysCheckAllPredicates, ) if err != nil { - predicateResultLock.Lock() - errs[err.Error()]++ - predicateResultLock.Unlock() + errCh.SendErrorWithCancel(err, cancel) return } if fits { - // Iterate each plugin to verify current node - status := g.framework.RunFilterPlugins(pluginContext, pod, nodeName) - if !status.IsSuccess() { - predicateResultLock.Lock() - filteredNodesStatuses[nodeName] = status - if !status.IsUnschedulable() { - errs[status.Message()]++ - } - predicateResultLock.Unlock() - return - } - length := atomic.AddInt32(&filteredLen, 1) if length > numNodesToFind { cancel() atomic.AddInt32(&filteredLen, -1) } else { - filtered[length-1] = g.nodeInfoSnapshot.NodeInfoMap[nodeName].Node() + filtered[length-1] = nodeInfo.Node() } } else { predicateResultLock.Lock() - failedPredicateMap[nodeName] = failedPredicates + if !status.IsSuccess() { + filteredNodesStatuses[nodeInfo.Node().Name] = status + } + if len(failedPredicates) != 0 { + failedPredicateMap[nodeInfo.Node().Name] = failedPredicates + } predicateResultLock.Unlock() } } // Stops searching for more nodes once the configured number of feasible nodes // are found. - workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode) + workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode) + processedNodes := int(filteredLen) + len(filteredNodesStatuses) + len(failedPredicateMap) + g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes filtered = filtered[:filteredLen] - if len(errs) > 0 { - return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, errors.CreateAggregateFromMessageCountMap(errs) + if err := errCh.ReceiveError(); err != nil { + return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, err } } @@ -573,34 +577,42 @@ func (g *genericScheduler) findNodesThatFit(pluginContext *framework.PluginConte // addNominatedPods adds pods with equal or greater priority which are nominated // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether -// any pod was added, 2) augmented meta data, 3) augmented nodeInfo. -func addNominatedPods(pod *v1.Pod, meta predicates.PredicateMetadata, - nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata, - *schedulernodeinfo.NodeInfo) { - if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { +// any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo. +func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.Metadata, state *framework.CycleState, + nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.Metadata, + *framework.CycleState, *schedulernodeinfo.NodeInfo, error) { + if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil { // This may happen only in tests. - return false, meta, nodeInfo + return false, meta, state, nodeInfo, nil } - nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name) - if nominatedPods == nil || len(nominatedPods) == 0 { - return false, meta, nodeInfo + nominatedPods := g.schedulingQueue.NominatedPodsForNode(nodeInfo.Node().Name) + if len(nominatedPods) == 0 { + return false, meta, state, nodeInfo, nil } - var metaOut predicates.PredicateMetadata + nodeInfoOut := nodeInfo.Clone() + var metaOut predicates.Metadata if meta != nil { metaOut = meta.ShallowCopy() } - nodeInfoOut := nodeInfo.Clone() + stateOut := state.Clone() + stateOut.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaOut}) podsAdded := false for _, p := range nominatedPods { - if util.GetPodPriority(p) >= util.GetPodPriority(pod) && p.UID != pod.UID { + if podutil.GetPodPriority(p) >= podutil.GetPodPriority(pod) && p.UID != pod.UID { nodeInfoOut.AddPod(p) if metaOut != nil { - metaOut.AddPod(p, nodeInfoOut) + if err := metaOut.AddPod(p, nodeInfoOut.Node()); err != nil { + return false, meta, state, nodeInfo, err + } + } + status := g.framework.RunPreFilterExtensionAddPod(ctx, stateOut, pod, p, nodeInfoOut) + if !status.IsSuccess() { + return false, meta, state, nodeInfo, status.AsError() } podsAdded = true } } - return podsAdded, metaOut, nodeInfoOut + return podsAdded, metaOut, stateOut, nodeInfoOut, nil } // podFitsOnNode checks whether a node given by NodeInfo satisfies the given predicate functions. @@ -613,15 +625,16 @@ func addNominatedPods(pod *v1.Pod, meta predicates.PredicateMetadata, // When it is called from Preempt, we should remove the victims of preemption and // add the nominated pods. Removal of the victims is done by SelectVictimsOnNode(). // It removes victims from meta and NodeInfo before calling this function. -func podFitsOnNode( +func (g *genericScheduler) podFitsOnNode( + ctx context.Context, + state *framework.CycleState, pod *v1.Pod, - meta predicates.PredicateMetadata, + meta predicates.Metadata, info *schedulernodeinfo.NodeInfo, - predicateFuncs map[string]predicates.FitPredicate, - queue internalqueue.SchedulingQueue, alwaysCheckAllPredicates bool, -) (bool, []predicates.PredicateFailureReason, error) { +) (bool, []predicates.PredicateFailureReason, *framework.Status, error) { var failedPredicates []predicates.PredicateFailureReason + var status *framework.Status podsAdded := false // We run predicates twice in some cases. If the node has greater or equal priority @@ -644,23 +657,29 @@ func podFitsOnNode( // they may end up getting scheduled to a different node. for i := 0; i < 2; i++ { metaToUse := meta + stateToUse := state nodeInfoToUse := info if i == 0 { - podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(pod, meta, info, queue) - } else if !podsAdded || len(failedPredicates) != 0 { + var err error + podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info) + if err != nil { + return false, []predicates.PredicateFailureReason{}, nil, err + } + } else if !podsAdded || len(failedPredicates) != 0 || !status.IsSuccess() { break } + for _, predicateKey := range predicates.Ordering() { var ( fit bool reasons []predicates.PredicateFailureReason err error ) - //TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric - if predicate, exist := predicateFuncs[predicateKey]; exist { + + if predicate, exist := g.predicates[predicateKey]; exist { fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) if err != nil { - return false, []predicates.PredicateFailureReason{}, err + return false, []predicates.PredicateFailureReason{}, nil, err } if !fit { @@ -676,36 +695,38 @@ func podFitsOnNode( } } } + + status = g.framework.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse) + if !status.IsSuccess() && !status.IsUnschedulable() { + return false, failedPredicates, status, status.AsError() + } } - return len(failedPredicates) == 0, failedPredicates, nil + return len(failedPredicates) == 0 && status.IsSuccess(), failedPredicates, status, nil } -// PrioritizeNodes prioritizes the nodes by running the individual priority functions in parallel. +// prioritizeNodes prioritizes the nodes by running the individual priority functions in parallel. // Each priority function is expected to set a score of 0-10 // 0 is the lowest priority score (least preferred node) and 10 is the highest // Each priority function can also have its own weight // The node scores returned by the priority function are multiplied by the weights to get weighted scores // All scores are finally combined (added) to get the total weighted scores of all nodes -func PrioritizeNodes( +func (g *genericScheduler) prioritizeNodes( + ctx context.Context, + state *framework.CycleState, pod *v1.Pod, - nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, meta interface{}, - priorityConfigs []priorities.PriorityConfig, nodes []*v1.Node, - extenders []algorithm.SchedulerExtender, - framework framework.Framework, - pluginContext *framework.PluginContext) (schedulerapi.HostPriorityList, error) { - // If no priority configs are provided, then the EqualPriority function is applied +) (framework.NodeScoreList, error) { + // If no priority configs are provided, then all nodes will have a score of one. // This is required to generate the priority list in the required format - if len(priorityConfigs) == 0 && len(extenders) == 0 { - result := make(schedulerapi.HostPriorityList, 0, len(nodes)) + if len(g.prioritizers) == 0 && len(g.extenders) == 0 && !g.framework.HasScorePlugins() { + result := make(framework.NodeScoreList, 0, len(nodes)) for i := range nodes { - hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name]) - if err != nil { - return nil, err - } - result = append(result, hostPriority) + result = append(result, framework.NodeScore{ + Name: nodes[i].Name, + Score: 1, + }) } return result, nil } @@ -721,55 +742,41 @@ func PrioritizeNodes( errs = append(errs, err) } - results := make([]schedulerapi.HostPriorityList, len(priorityConfigs), len(priorityConfigs)) + results := make([]framework.NodeScoreList, len(g.prioritizers)) - // DEPRECATED: we can remove this when all priorityConfigs implement the - // Map-Reduce pattern. - for i := range priorityConfigs { - if priorityConfigs[i].Function != nil { - wg.Add(1) - go func(index int) { - defer wg.Done() - var err error - results[index], err = priorityConfigs[index].Function(pod, nodeNameToInfo, nodes) - if err != nil { - appendError(err) - } - }(i) - } else { - results[i] = make(schedulerapi.HostPriorityList, len(nodes)) - } + for i := range g.prioritizers { + results[i] = make(framework.NodeScoreList, len(nodes)) } workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) { - nodeInfo := nodeNameToInfo[nodes[index].Name] - for i := range priorityConfigs { - if priorityConfigs[i].Function != nil { - continue - } - + nodeInfo := g.nodeInfoSnapshot.NodeInfoMap[nodes[index].Name] + for i := range g.prioritizers { var err error - results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo) + results[i][index], err = g.prioritizers[i].Map(pod, meta, nodeInfo) if err != nil { appendError(err) - results[i][index].Host = nodes[index].Name + results[i][index].Name = nodes[index].Name } } }) - for i := range priorityConfigs { - if priorityConfigs[i].Reduce == nil { + for i := range g.prioritizers { + if g.prioritizers[i].Reduce == nil { continue } wg.Add(1) go func(index int) { - defer wg.Done() - if err := priorityConfigs[index].Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { + metrics.SchedulerGoroutines.WithLabelValues("prioritizing_mapreduce").Inc() + defer func() { + metrics.SchedulerGoroutines.WithLabelValues("prioritizing_mapreduce").Dec() + wg.Done() + }() + if err := g.prioritizers[index].Reduce(pod, meta, g.nodeInfoSnapshot, results[index]); err != nil { appendError(err) } if klog.V(10) { for _, hostPriority := range results[index] { - klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Name, g.prioritizers[index].Name, hostPriority.Score) } } }(i) @@ -777,22 +784,23 @@ func PrioritizeNodes( // Wait for all computations to be finished. wg.Wait() if len(errs) != 0 { - return schedulerapi.HostPriorityList{}, errors.NewAggregate(errs) + return framework.NodeScoreList{}, errors.NewAggregate(errs) } // Run the Score plugins. - scoresMap, scoreStatus := framework.RunScorePlugins(pluginContext, pod, nodes) + state.Write(migration.PrioritiesStateKey, &migration.PrioritiesStateData{Reference: meta}) + scoresMap, scoreStatus := g.framework.RunScorePlugins(ctx, state, pod, nodes) if !scoreStatus.IsSuccess() { - return schedulerapi.HostPriorityList{}, scoreStatus.AsError() + return framework.NodeScoreList{}, scoreStatus.AsError() } // Summarize all scores. - result := make(schedulerapi.HostPriorityList, 0, len(nodes)) + result := make(framework.NodeScoreList, 0, len(nodes)) for i := range nodes { - result = append(result, schedulerapi.HostPriority{Host: nodes[i].Name, Score: 0}) - for j := range priorityConfigs { - result[i].Score += results[j][i].Score * priorityConfigs[j].Weight + result = append(result, framework.NodeScore{Name: nodes[i].Name, Score: 0}) + for j := range g.prioritizers { + result[i].Score += results[j][i].Score * g.prioritizers[j].Weight } for j := range scoresMap { @@ -800,16 +808,20 @@ func PrioritizeNodes( } } - if len(extenders) != 0 && nodes != nil { - combinedScores := make(map[string]int, len(nodeNameToInfo)) - for i := range extenders { - if !extenders[i].IsInterested(pod) { + if len(g.extenders) != 0 && nodes != nil { + combinedScores := make(map[string]int64, len(g.nodeInfoSnapshot.NodeInfoList)) + for i := range g.extenders { + if !g.extenders[i].IsInterested(pod) { continue } wg.Add(1) go func(extIndex int) { - defer wg.Done() - prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes) + metrics.SchedulerGoroutines.WithLabelValues("prioritizing_extender").Inc() + defer func() { + metrics.SchedulerGoroutines.WithLabelValues("prioritizing_extender").Dec() + wg.Done() + }() + prioritizedList, weight, err := g.extenders[extIndex].Prioritize(pod, nodes) if err != nil { // Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities return @@ -818,7 +830,7 @@ func PrioritizeNodes( for i := range *prioritizedList { host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score if klog.V(10) { - klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, g.extenders[extIndex].Name(), score) } combinedScores[host] += score * weight } @@ -828,30 +840,20 @@ func PrioritizeNodes( // wait for all go routines to finish wg.Wait() for i := range result { - result[i].Score += combinedScores[result[i].Host] + // MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore, + // therefore we need to scale the score returned by extenders to the score range used by the scheduler. + result[i].Score += combinedScores[result[i].Name] * (framework.MaxNodeScore / extenderv1.MaxExtenderPriority) } } if klog.V(10) { for i := range result { - klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score) + klog.Infof("Host %s => Score %d", result[i].Name, result[i].Score) } } return result, nil } -// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes -func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { - node := nodeInfo.Node() - if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") - } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: 1, - }, nil -} - // pickOneNodeForPreemption chooses one node among the given nodes. It assumes // pods in each map entry are ordered by decreasing priority. // It picks a node based on the following criteria: @@ -863,11 +865,11 @@ func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.Node // 6. If there are still ties, the first such node is picked (sort of randomly). // The 'minNodes1' and 'minNodes2' are being reused here to save the memory // allocation and garbage collection time. -func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) *v1.Node { +func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*extenderv1.Victims) *v1.Node { if len(nodesToVictims) == 0 { return nil } - minNumPDBViolatingPods := math.MaxInt32 + minNumPDBViolatingPods := int64(math.MaxInt32) var minNodes1 []*v1.Node lenNodes1 := 0 for node, victims := range nodesToVictims { @@ -902,7 +904,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) node := minNodes1[i] victims := nodesToVictims[node] // highestPodPriority is the highest priority among the victims on this node. - highestPodPriority := util.GetPodPriority(victims.Pods[0]) + highestPodPriority := podutil.GetPodPriority(victims.Pods[0]) if highestPodPriority < minHighestPriority { minHighestPriority = highestPodPriority lenNodes2 = 0 @@ -928,7 +930,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) // needed so that a node with a few pods with negative priority is not // picked over a node with a smaller number of pods with the same negative // priority (and similar scenarios). - sumPriorities += int64(util.GetPodPriority(pod)) + int64(math.MaxInt32+1) + sumPriorities += int64(podutil.GetPodPriority(pod)) + int64(math.MaxInt32+1) } if sumPriorities < minSumPriorities { minSumPriorities = sumPriorities @@ -992,31 +994,36 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) // selectNodesForPreemption finds all the nodes with possible victims for // preemption in parallel. -func selectNodesForPreemption(pod *v1.Pod, - nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, +func (g *genericScheduler) selectNodesForPreemption( + ctx context.Context, + state *framework.CycleState, + pod *v1.Pod, potentialNodes []*v1.Node, - fitPredicates map[string]predicates.FitPredicate, - metadataProducer predicates.PredicateMetadataProducer, - queue internalqueue.SchedulingQueue, pdbs []*policy.PodDisruptionBudget, -) (map[*v1.Node]*schedulerapi.Victims, error) { - nodeToVictims := map[*v1.Node]*schedulerapi.Victims{} +) (map[*v1.Node]*extenderv1.Victims, error) { + nodeToVictims := map[*v1.Node]*extenderv1.Victims{} var resultLock sync.Mutex // We can use the same metadata producer for all nodes. - meta := metadataProducer(pod, nodeNameToInfo) + meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot) checkNode := func(i int) { nodeName := potentialNodes[i].Name - var metaCopy predicates.PredicateMetadata + if g.nodeInfoSnapshot.NodeInfoMap[nodeName] == nil { + return + } + nodeInfoCopy := g.nodeInfoSnapshot.NodeInfoMap[nodeName].Clone() + var metaCopy predicates.Metadata if meta != nil { metaCopy = meta.ShallowCopy() } - pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], fitPredicates, queue, pdbs) + stateCopy := state.Clone() + stateCopy.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaCopy}) + pods, numPDBViolations, fits := g.selectVictimsOnNode(ctx, stateCopy, pod, metaCopy, nodeInfoCopy, pdbs) if fits { resultLock.Lock() - victims := schedulerapi.Victims{ + victims := extenderv1.Victims{ Pods: pods, - NumPDBViolations: numPDBViolations, + NumPDBViolations: int64(numPDBViolations), } nodeToVictims[potentialNodes[i]] = &victims resultLock.Unlock() @@ -1031,9 +1038,9 @@ func selectNodesForPreemption(pod *v1.Pod, // preempted. // This function is stable and does not change the order of received pods. So, if it // receives a sorted list, grouping will preserve the order of the input list. -func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruptionBudget) (violatingPods, nonViolatingPods []*v1.Pod) { +func filterPodsWithPDBViolation(pods []*v1.Pod, pdbs []*policy.PodDisruptionBudget) (violatingPods, nonViolatingPods []*v1.Pod) { for _, obj := range pods { - pod := obj.(*v1.Pod) + pod := obj pdbForPodIsViolated := false // A pod with no labels will not match any PDB. So, no need to check. if len(pod.Labels) != 0 { @@ -1080,39 +1087,53 @@ func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruption // NOTE: This function assumes that it is never called if "pod" cannot be scheduled // due to pod affinity, node affinity, or node anti-affinity reasons. None of // these predicates can be satisfied by removing more pods from the node. -func selectVictimsOnNode( +func (g *genericScheduler) selectVictimsOnNode( + ctx context.Context, + state *framework.CycleState, pod *v1.Pod, - meta predicates.PredicateMetadata, + meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo, - fitPredicates map[string]predicates.FitPredicate, - queue internalqueue.SchedulingQueue, pdbs []*policy.PodDisruptionBudget, ) ([]*v1.Pod, int, bool) { - if nodeInfo == nil { - return nil, 0, false - } - potentialVictims := util.SortableList{CompFunc: util.MoreImportantPod} - nodeInfoCopy := nodeInfo.Clone() + var potentialVictims []*v1.Pod - removePod := func(rp *v1.Pod) { - nodeInfoCopy.RemovePod(rp) + removePod := func(rp *v1.Pod) error { + if err := nodeInfo.RemovePod(rp); err != nil { + return err + } if meta != nil { - meta.RemovePod(rp, nodeInfoCopy.Node()) + if err := meta.RemovePod(rp, nodeInfo.Node()); err != nil { + return err + } + } + status := g.framework.RunPreFilterExtensionRemovePod(ctx, state, pod, rp, nodeInfo) + if !status.IsSuccess() { + return status.AsError() } + return nil } - addPod := func(ap *v1.Pod) { - nodeInfoCopy.AddPod(ap) + addPod := func(ap *v1.Pod) error { + nodeInfo.AddPod(ap) if meta != nil { - meta.AddPod(ap, nodeInfoCopy) + if err := meta.AddPod(ap, nodeInfo.Node()); err != nil { + return err + } + } + status := g.framework.RunPreFilterExtensionAddPod(ctx, state, pod, ap, nodeInfo) + if !status.IsSuccess() { + return status.AsError() } + return nil } // As the first step, remove all the lower priority pods from the node and // check if the given pod can be scheduled. - podPriority := util.GetPodPriority(pod) - for _, p := range nodeInfoCopy.Pods() { - if util.GetPodPriority(p) < podPriority { - potentialVictims.Items = append(potentialVictims.Items, p) - removePod(p) + podPriority := podutil.GetPodPriority(pod) + for _, p := range nodeInfo.Pods() { + if podutil.GetPodPriority(p) < podPriority { + potentialVictims = append(potentialVictims, p) + if err := removePod(p); err != nil { + return nil, 0, false + } } } // If the new pod does not fit after removing all the lower priority pods, @@ -1121,68 +1142,70 @@ func selectVictimsOnNode( // inter-pod affinity to one or more victims, but we have decided not to // support this case for performance reasons. Having affinity to lower // priority pods is not a recommended configuration anyway. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, queue, false); !fits { + if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false); !fits { if err != nil { klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } + return nil, 0, false } var victims []*v1.Pod numViolatingVictim := 0 - potentialVictims.Sort() + sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i], potentialVictims[j]) }) // Try to reprieve as many pods as possible. We first try to reprieve the PDB // violating victims and then other non-violating ones. In both cases, we start // from the highest priority victims. - violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) - reprievePod := func(p *v1.Pod) bool { - addPod(p) - fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, queue, false) + violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims, pdbs) + reprievePod := func(p *v1.Pod) (bool, error) { + if err := addPod(p); err != nil { + return false, err + } + fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false) if !fits { - removePod(p) + if err := removePod(p); err != nil { + return false, err + } victims = append(victims, p) klog.V(5).Infof("Pod %v/%v is a potential preemption victim on node %v.", p.Namespace, p.Name, nodeInfo.Node().Name) } - return fits + return fits, nil } for _, p := range violatingVictims { - if !reprievePod(p) { + if fits, err := reprievePod(p); err != nil { + klog.Warningf("Failed to reprieve pod %q: %v", p.Name, err) + return nil, 0, false + } else if !fits { numViolatingVictim++ } } // Now we try to reprieve non-violating victims. for _, p := range nonViolatingVictims { - reprievePod(p) - } - return victims, numViolatingVictim, true -} - -// unresolvablePredicateExists checks whether failedPredicates has unresolvable predicate. -func unresolvablePredicateExists(failedPredicates []predicates.PredicateFailureReason) bool { - for _, failedPredicate := range failedPredicates { - if _, ok := unresolvablePredicateFailureErrors[failedPredicate]; ok { - return true + if _, err := reprievePod(p); err != nil { + klog.Warningf("Failed to reprieve pod %q: %v", p.Name, err) + return nil, 0, false } } - return false + return victims, numViolatingVictim, true } // nodesWherePreemptionMightHelp returns a list of nodes with failed predicates // that may be satisfied by removing pods from the node. -func nodesWherePreemptionMightHelp(nodes []*v1.Node, fitErr *FitError) []*v1.Node { +func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, fitErr *FitError) []*v1.Node { potentialNodes := []*v1.Node{} - for _, node := range nodes { - if fitErr.FilteredNodesStatuses[node.Name].Code() == framework.UnschedulableAndUnresolvable { + for name, node := range nodeNameToInfo { + if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable { continue } - failedPredicates, _ := fitErr.FailedPredicates[node.Name] + failedPredicates := fitErr.FailedPredicates[name] + // If we assume that scheduler looks at all nodes and populates the failedPredicateMap // (which is the case today), the !found case should never happen, but we'd prefer // to rely less on such assumptions in the code when checking does not impose // significant overhead. // Also, we currently assume all failures returned by extender as resolvable. - if !unresolvablePredicateExists(failedPredicates) { - klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name) - potentialNodes = append(potentialNodes, node) + if predicates.UnresolvablePredicateExists(failedPredicates) == nil { + klog.V(3).Infof("Node %v is a potential node for preemption.", name) + potentialNodes = append(potentialNodes, node.Node()) } } return potentialNodes @@ -1202,9 +1225,9 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedule nomNodeName := pod.Status.NominatedNodeName if len(nomNodeName) > 0 { if nodeInfo, found := nodeNameToInfo[nomNodeName]; found { - podPriority := util.GetPodPriority(pod) + podPriority := podutil.GetPodPriority(pod) for _, p := range nodeInfo.Pods() { - if p.DeletionTimestamp != nil && util.GetPodPriority(p) < podPriority { + if p.DeletionTimestamp != nil && podutil.GetPodPriority(p) < podPriority { // There is a terminating pod on the nominated node. return false } @@ -1245,19 +1268,19 @@ func NewGenericScheduler( cache internalcache.Cache, podQueue internalqueue.SchedulingQueue, predicates map[string]predicates.FitPredicate, - predicateMetaProducer predicates.PredicateMetadataProducer, + predicateMetaProducer predicates.MetadataProducer, prioritizers []priorities.PriorityConfig, - priorityMetaProducer priorities.PriorityMetadataProducer, + priorityMetaProducer priorities.MetadataProducer, + nodeInfoSnapshot *nodeinfosnapshot.Snapshot, framework framework.Framework, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, - pdbLister algorithm.PDBLister, + pdbLister policylisters.PodDisruptionBudgetLister, alwaysCheckAllPredicates bool, disablePreemption bool, percentageOfNodesToScore int32, - enableNonPreempting bool, -) ScheduleAlgorithm { + enableNonPreempting bool) ScheduleAlgorithm { return &genericScheduler{ cache: cache, schedulingQueue: podQueue, @@ -1267,7 +1290,7 @@ func NewGenericScheduler( priorityMetaProducer: priorityMetaProducer, framework: framework, extenders: extenders, - nodeInfoSnapshot: framework.NodeInfoSnapshot(), + nodeInfoSnapshot: nodeInfoSnapshot, volumeBinder: volumeBinder, pvcLister: pvcLister, pdbLister: pdbLister, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go index 53462e221ba..95842ffaf25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go @@ -24,14 +24,13 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" - storageinformersv1 "k8s.io/client-go/informers/storage/v1" - storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/internal/queue" ) func (sched *Scheduler) onPvAdd(obj interface{}) { @@ -41,7 +40,7 @@ func (sched *Scheduler) onPvAdd(obj interface{}) { // provisioning and binding process, will not trigger events to schedule pod // again. So we need to move pods to active queue on PV add for this // scenario. - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.PvAdd) } func (sched *Scheduler) onPvUpdate(old, new interface{}) { @@ -49,15 +48,15 @@ func (sched *Scheduler) onPvUpdate(old, new interface{}) { // bindings due to conflicts if PVs are updated by PV controller or other // parties, then scheduler will add pod back to unschedulable queue. We // need to move pods to active queue on PV update for this scenario. - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.PvUpdate) } func (sched *Scheduler) onPvcAdd(obj interface{}) { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.PvcAdd) } func (sched *Scheduler) onPvcUpdate(old, new interface{}) { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.PvcUpdate) } func (sched *Scheduler) onStorageClassAdd(obj interface{}) { @@ -74,20 +73,20 @@ func (sched *Scheduler) onStorageClassAdd(obj interface{}) { // We don't need to invalidate cached results because results will not be // cached for pod that has unbound immediate PVCs. if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.StorageClassAdd) } } func (sched *Scheduler) onServiceAdd(obj interface{}) { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.ServiceAdd) } func (sched *Scheduler) onServiceUpdate(oldObj interface{}, newObj interface{}) { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.ServiceUpdate) } func (sched *Scheduler) onServiceDelete(obj interface{}) { - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.ServiceDelete) } func (sched *Scheduler) addNodeToCache(obj interface{}) { @@ -101,7 +100,7 @@ func (sched *Scheduler) addNodeToCache(obj interface{}) { klog.Errorf("scheduler cache AddNode failed: %v", err) } - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.NodeAdd) } func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) { @@ -125,8 +124,10 @@ func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) { // to save processing cycles. We still trigger a move to active queue to cover the case // that a pod being processed by the scheduler is determined unschedulable. We want this // pod to be reevaluated when a change in the cluster happens. - if sched.SchedulingQueue.NumUnschedulablePods() == 0 || nodeSchedulingPropertiesChanged(newNode, oldNode) { - sched.SchedulingQueue.MoveAllToActiveQueue() + if sched.SchedulingQueue.NumUnschedulablePods() == 0 { + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.Unknown) + } else if event := nodeSchedulingPropertiesChange(newNode, oldNode); event != "" { + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(event) } } @@ -157,59 +158,11 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) { } func (sched *Scheduler) onCSINodeAdd(obj interface{}) { - csiNode, ok := obj.(*storagev1beta1.CSINode) - if !ok { - klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", obj) - return - } - - if err := sched.SchedulerCache.AddCSINode(csiNode); err != nil { - klog.Errorf("scheduler cache AddCSINode failed: %v", err) - } - - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.CSINodeAdd) } func (sched *Scheduler) onCSINodeUpdate(oldObj, newObj interface{}) { - oldCSINode, ok := oldObj.(*storagev1beta1.CSINode) - if !ok { - klog.Errorf("cannot convert oldObj to *storagev1beta1.CSINode: %v", oldObj) - return - } - - newCSINode, ok := newObj.(*storagev1beta1.CSINode) - if !ok { - klog.Errorf("cannot convert newObj to *storagev1beta1.CSINode: %v", newObj) - return - } - - if err := sched.SchedulerCache.UpdateCSINode(oldCSINode, newCSINode); err != nil { - klog.Errorf("scheduler cache UpdateCSINode failed: %v", err) - } - - sched.SchedulingQueue.MoveAllToActiveQueue() -} - -func (sched *Scheduler) onCSINodeDelete(obj interface{}) { - var csiNode *storagev1beta1.CSINode - switch t := obj.(type) { - case *storagev1beta1.CSINode: - csiNode = t - case cache.DeletedFinalStateUnknown: - var ok bool - csiNode, ok = t.Obj.(*storagev1beta1.CSINode) - if !ok { - klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", t.Obj) - return - } - default: - klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", t) - return - } - - if err := sched.SchedulerCache.RemoveCSINode(csiNode); err != nil { - klog.Errorf("scheduler cache RemoveCSINode failed: %v", err) - } + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.CSINodeUpdate) } func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) { @@ -251,6 +204,7 @@ func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) { // Volume binder only wants to keep unassigned pods sched.VolumeBinder.DeletePodBindings(pod) } + sched.Framework.RejectWaitingPod(pod.UID) } func (sched *Scheduler) addPodToCache(obj interface{}) { @@ -316,7 +270,7 @@ func (sched *Scheduler) deletePodFromCache(obj interface{}) { klog.Errorf("scheduler cache RemovePod failed: %v", err) } - sched.SchedulingQueue.MoveAllToActiveQueue() + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.AssignedPodDelete) } // assignedPod selects pods that are assigned (scheduled and running). @@ -381,13 +335,8 @@ func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool { func AddAllEventHandlers( sched *Scheduler, schedulerName string, - nodeInformer coreinformers.NodeInformer, + informerFactory informers.SharedInformerFactory, podInformer coreinformers.PodInformer, - pvInformer coreinformers.PersistentVolumeInformer, - pvcInformer coreinformers.PersistentVolumeClaimInformer, - serviceInformer coreinformers.ServiceInformer, - storageClassInformer storageinformersv1.StorageClassInformer, - csiNodeInformer storageinformersv1beta1.CSINodeInformer, ) { // scheduled pod cache podInformer.Informer().AddEventHandler( @@ -440,7 +389,7 @@ func AddAllEventHandlers( }, ) - nodeInformer.Informer().AddEventHandler( + informerFactory.Core().V1().Nodes().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: sched.addNodeToCache, UpdateFunc: sched.updateNodeInCache, @@ -449,18 +398,17 @@ func AddAllEventHandlers( ) if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { - csiNodeInformer.Informer().AddEventHandler( + informerFactory.Storage().V1().CSINodes().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: sched.onCSINodeAdd, UpdateFunc: sched.onCSINodeUpdate, - DeleteFunc: sched.onCSINodeDelete, }, ) } // On add and delete of PVs, it will affect equivalence cache items // related to persistent volume - pvInformer.Informer().AddEventHandler( + informerFactory.Core().V1().PersistentVolumes().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ // MaxPDVolumeCountPredicate: since it relies on the counts of PV. AddFunc: sched.onPvAdd, @@ -469,7 +417,7 @@ func AddAllEventHandlers( ) // This is for MaxPDVolumeCountPredicate: add/delete PVC will affect counts of PV when it is bound. - pvcInformer.Informer().AddEventHandler( + informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: sched.onPvcAdd, UpdateFunc: sched.onPvcUpdate, @@ -479,7 +427,7 @@ func AddAllEventHandlers( // This is for ServiceAffinity: affected by the selector of the service is updated. // Also, if new service is added, equivalence cache will also become invalid since // existing pods may be "captured" by this service and change this predicate result. - serviceInformer.Informer().AddEventHandler( + informerFactory.Core().V1().Services().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: sched.onServiceAdd, UpdateFunc: sched.onServiceUpdate, @@ -487,31 +435,31 @@ func AddAllEventHandlers( }, ) - storageClassInformer.Informer().AddEventHandler( + informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: sched.onStorageClassAdd, }, ) } -func nodeSchedulingPropertiesChanged(newNode *v1.Node, oldNode *v1.Node) bool { +func nodeSchedulingPropertiesChange(newNode *v1.Node, oldNode *v1.Node) string { if nodeSpecUnschedulableChanged(newNode, oldNode) { - return true + return queue.NodeSpecUnschedulableChange } if nodeAllocatableChanged(newNode, oldNode) { - return true + return queue.NodeAllocatableChange } if nodeLabelsChanged(newNode, oldNode) { - return true + return queue.NodeLabelChange } if nodeTaintsChanged(newNode, oldNode) { - return true + return queue.NodeTaintChange } if nodeConditionsChanged(newNode, oldNode) { - return true + return queue.NodeConditionChange } - return false + return "" } func nodeAllocatableChanged(newNode *v1.Node, oldNode *v1.Node) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory.go new file mode 100644 index 00000000000..4c977d466a7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory.go @@ -0,0 +1,537 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + policylisters "k8s.io/client-go/listers/policy/v1beta1" + storagelisters "k8s.io/client-go/listers/storage/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" + internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" +) + +const ( + initialGetBackoff = 100 * time.Millisecond + maximalGetBackoff = time.Minute +) + +// Binder knows how to write a binding. +type Binder interface { + Bind(binding *v1.Binding) error +} + +// Configurator defines I/O, caching, and other functionality needed to +// construct a new scheduler. +type Configurator struct { + client clientset.Interface + + informerFactory informers.SharedInformerFactory + + podInformer coreinformers.PodInformer + + // Close this to stop all reflectors + StopEverything <-chan struct{} + + schedulerCache internalcache.Cache + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range [0-100]. + hardPodAffinitySymmetricWeight int32 + + // Handles volume binding decisions + volumeBinder *volumebinder.VolumeBinder + + // Always check all predicates even if the middle of one predicate fails. + alwaysCheckAllPredicates bool + + // Disable pod preemption or not. + disablePreemption bool + + // percentageOfNodesToScore specifies percentage of all nodes to score in each scheduling cycle. + percentageOfNodesToScore int32 + + bindTimeoutSeconds int64 + + podInitialBackoffSeconds int64 + + podMaxBackoffSeconds int64 + + enableNonPreempting bool + + // framework configuration arguments. + registry framework.Registry + plugins *schedulerapi.Plugins + pluginConfig []schedulerapi.PluginConfig + pluginConfigProducerRegistry *plugins.ConfigProducerRegistry + nodeInfoSnapshot *nodeinfosnapshot.Snapshot + + algorithmFactoryArgs AlgorithmFactoryArgs + configProducerArgs *plugins.ConfigProducerArgs +} + +// GetHardPodAffinitySymmetricWeight is exposed for testing. +func (c *Configurator) GetHardPodAffinitySymmetricWeight() int32 { + return c.hardPodAffinitySymmetricWeight +} + +// Create creates a scheduler with the default algorithm provider. +func (c *Configurator) Create() (*Scheduler, error) { + return c.CreateFromProvider(DefaultProvider) +} + +// CreateFromProvider creates a scheduler from the name of a registered algorithm provider. +func (c *Configurator) CreateFromProvider(providerName string) (*Scheduler, error) { + klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) + provider, err := GetAlgorithmProvider(providerName) + if err != nil { + return nil, err + } + return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{}) +} + +// CreateFromConfig creates a scheduler from the configuration file +func (c *Configurator) CreateFromConfig(policy schedulerapi.Policy) (*Scheduler, error) { + klog.V(2).Infof("Creating scheduler from configuration: %v", policy) + + // validate the policy configuration + if err := validation.ValidatePolicy(policy); err != nil { + return nil, err + } + + predicateKeys := sets.NewString() + if policy.Predicates == nil { + klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) + provider, err := GetAlgorithmProvider(DefaultProvider) + if err != nil { + return nil, err + } + predicateKeys = provider.FitPredicateKeys + } else { + for _, predicate := range policy.Predicates { + klog.V(2).Infof("Registering predicate: %s", predicate.Name) + predicateKeys.Insert(RegisterCustomFitPredicate(predicate, c.configProducerArgs)) + } + } + + priorityKeys := sets.NewString() + if policy.Priorities == nil { + klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) + provider, err := GetAlgorithmProvider(DefaultProvider) + if err != nil { + return nil, err + } + priorityKeys = provider.PriorityFunctionKeys + } else { + for _, priority := range policy.Priorities { + if priority.Name == priorities.EqualPriority { + klog.V(2).Infof("Skip registering priority: %s", priority.Name) + continue + } + klog.V(2).Infof("Registering priority: %s", priority.Name) + priorityKeys.Insert(RegisterCustomPriorityFunction(priority, c.configProducerArgs)) + } + } + + var extenders []algorithm.SchedulerExtender + if len(policy.Extenders) != 0 { + ignoredExtendedResources := sets.NewString() + var ignorableExtenders []algorithm.SchedulerExtender + for ii := range policy.Extenders { + klog.V(2).Infof("Creating extender with config %+v", policy.Extenders[ii]) + extender, err := core.NewHTTPExtender(&policy.Extenders[ii]) + if err != nil { + return nil, err + } + if !extender.IsIgnorable() { + extenders = append(extenders, extender) + } else { + ignorableExtenders = append(ignorableExtenders, extender) + } + for _, r := range policy.Extenders[ii].ManagedResources { + if r.IgnoredByScheduler { + ignoredExtendedResources.Insert(string(r.Name)) + } + } + } + // place ignorable extenders to the tail of extenders + extenders = append(extenders, ignorableExtenders...) + predicates.RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtendedResources) + } + // Providing HardPodAffinitySymmetricWeight in the policy config is the new and preferred way of providing the value. + // Give it higher precedence than scheduler CLI configuration when it is provided. + if policy.HardPodAffinitySymmetricWeight != 0 { + c.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight + } + // When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured + // predicates even after one or more of them fails. + if policy.AlwaysCheckAllPredicates { + c.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates + } + + return c.CreateFromKeys(predicateKeys, priorityKeys, extenders) +} + +// CreateFromKeys creates a scheduler from a set of registered fit predicate keys and priority keys. +func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Scheduler, error) { + klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) + + if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 { + return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight()) + } + + predicateFuncs, pluginsForPredicates, pluginConfigForPredicates, err := c.getPredicateConfigs(predicateKeys) + if err != nil { + return nil, err + } + + priorityConfigs, pluginsForPriorities, pluginConfigForPriorities, err := c.getPriorityConfigs(priorityKeys) + if err != nil { + return nil, err + } + + priorityMetaProducer, err := getPriorityMetadataProducer(c.algorithmFactoryArgs) + if err != nil { + return nil, err + } + + predicateMetaProducer, err := getPredicateMetadataProducer(c.algorithmFactoryArgs) + if err != nil { + return nil, err + } + + // Combine all framework configurations. If this results in any duplication, framework + // instantiation should fail. + var plugins schedulerapi.Plugins + plugins.Append(pluginsForPredicates) + plugins.Append(pluginsForPriorities) + plugins.Append(c.plugins) + var pluginConfig []schedulerapi.PluginConfig + pluginConfig = append(pluginConfig, pluginConfigForPredicates...) + pluginConfig = append(pluginConfig, pluginConfigForPriorities...) + pluginConfig = append(pluginConfig, c.pluginConfig...) + + framework, err := framework.NewFramework( + c.registry, + &plugins, + pluginConfig, + framework.WithClientSet(c.client), + framework.WithInformerFactory(c.informerFactory), + framework.WithSnapshotSharedLister(c.nodeInfoSnapshot), + ) + if err != nil { + klog.Fatalf("error initializing the scheduling framework: %v", err) + } + + podQueue := internalqueue.NewSchedulingQueue( + c.StopEverything, + framework, + internalqueue.WithPodInitialBackoffDuration(time.Duration(c.podInitialBackoffSeconds)*time.Second), + internalqueue.WithPodMaxBackoffDuration(time.Duration(c.podMaxBackoffSeconds)*time.Second), + ) + + // Setup cache debugger. + debugger := cachedebugger.New( + c.informerFactory.Core().V1().Nodes().Lister(), + c.podInformer.Lister(), + c.schedulerCache, + podQueue, + ) + debugger.ListenForSignal(c.StopEverything) + + go func() { + <-c.StopEverything + podQueue.Close() + }() + + algo := core.NewGenericScheduler( + c.schedulerCache, + podQueue, + predicateFuncs, + predicateMetaProducer, + priorityConfigs, + priorityMetaProducer, + c.nodeInfoSnapshot, + framework, + extenders, + c.volumeBinder, + c.informerFactory.Core().V1().PersistentVolumeClaims().Lister(), + GetPodDisruptionBudgetLister(c.informerFactory), + c.alwaysCheckAllPredicates, + c.disablePreemption, + c.percentageOfNodesToScore, + c.enableNonPreempting, + ) + + return &Scheduler{ + SchedulerCache: c.schedulerCache, + Algorithm: algo, + GetBinder: getBinderFunc(c.client, extenders), + Framework: framework, + NextPod: internalqueue.MakeNextPodFunc(podQueue), + Error: MakeDefaultErrorFunc(c.client, podQueue, c.schedulerCache), + StopEverything: c.StopEverything, + VolumeBinder: c.volumeBinder, + SchedulingQueue: podQueue, + Plugins: plugins, + PluginConfig: pluginConfig, + }, nil +} + +// getBinderFunc returns a func which returns an extender that supports bind or a default binder based on the given pod. +func getBinderFunc(client clientset.Interface, extenders []algorithm.SchedulerExtender) func(pod *v1.Pod) Binder { + defaultBinder := &binder{client} + return func(pod *v1.Pod) Binder { + for _, extender := range extenders { + if extender.IsBinder() && extender.IsInterested(pod) { + return extender + } + } + return defaultBinder + } +} + +// getPriorityConfigs returns priorities configuration: ones that will run as priorities and ones that will run +// as framework plugins. Specifically, a priority will run as a framework plugin if a plugin config producer was +// registered for that priority. +func (c *Configurator) getPriorityConfigs(priorityKeys sets.String) ([]priorities.PriorityConfig, *schedulerapi.Plugins, []schedulerapi.PluginConfig, error) { + allPriorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, c.algorithmFactoryArgs) + if err != nil { + return nil, nil, nil, err + } + + if c.pluginConfigProducerRegistry == nil { + return allPriorityConfigs, nil, nil, nil + } + + var priorityConfigs []priorities.PriorityConfig + var plugins schedulerapi.Plugins + var pluginConfig []schedulerapi.PluginConfig + frameworkConfigProducers := c.pluginConfigProducerRegistry.PriorityToConfigProducer + for _, p := range allPriorityConfigs { + if producer, exist := frameworkConfigProducers[p.Name]; exist { + args := *c.configProducerArgs + args.Weight = int32(p.Weight) + pl, pc := producer(args) + plugins.Append(&pl) + pluginConfig = append(pluginConfig, pc...) + } else { + priorityConfigs = append(priorityConfigs, p) + } + } + return priorityConfigs, &plugins, pluginConfig, nil +} + +// getPredicateConfigs returns predicates configuration: ones that will run as fitPredicates and ones that will run +// as framework plugins. Specifically, a predicate will run as a framework plugin if a plugin config producer was +// registered for that predicate. +// Note that the framework executes plugins according to their order in the Plugins list, and so predicates run as plugins +// are added to the Plugins list according to the order specified in predicates.Ordering(). +func (c *Configurator) getPredicateConfigs(predicateKeys sets.String) (map[string]predicates.FitPredicate, *schedulerapi.Plugins, []schedulerapi.PluginConfig, error) { + allFitPredicates, err := getFitPredicateFunctions(predicateKeys, c.algorithmFactoryArgs) + if err != nil { + return nil, nil, nil, err + } + + if c.pluginConfigProducerRegistry == nil { + return allFitPredicates, nil, nil, nil + } + + asPlugins := sets.NewString() + asFitPredicates := make(map[string]predicates.FitPredicate) + frameworkConfigProducers := c.pluginConfigProducerRegistry.PredicateToConfigProducer + + // First, identify the predicates that will run as actual fit predicates, and ones + // that will run as framework plugins. + for predicateKey := range allFitPredicates { + if _, exist := frameworkConfigProducers[predicateKey]; exist { + asPlugins.Insert(predicateKey) + } else { + asFitPredicates[predicateKey] = allFitPredicates[predicateKey] + } + } + + // Second, create the framework plugin configurations, and place them in the order + // that the corresponding predicates were supposed to run. + var plugins schedulerapi.Plugins + var pluginConfig []schedulerapi.PluginConfig + + for _, predicateKey := range predicates.Ordering() { + if asPlugins.Has(predicateKey) { + producer := frameworkConfigProducers[predicateKey] + p, pc := producer(*c.configProducerArgs) + plugins.Append(&p) + pluginConfig = append(pluginConfig, pc...) + asPlugins.Delete(predicateKey) + } + } + + // Third, add the rest in no specific order. + for predicateKey := range asPlugins { + producer := frameworkConfigProducers[predicateKey] + p, pc := producer(*c.configProducerArgs) + plugins.Append(&p) + pluginConfig = append(pluginConfig, pc...) + } + + return asFitPredicates, &plugins, pluginConfig, nil +} + +type podInformer struct { + informer cache.SharedIndexInformer +} + +func (i *podInformer) Informer() cache.SharedIndexInformer { + return i.informer +} + +func (i *podInformer) Lister() corelisters.PodLister { + return corelisters.NewPodLister(i.informer.GetIndexer()) +} + +// NewPodInformer creates a shared index informer that returns only non-terminal pods. +func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) coreinformers.PodInformer { + selector := fields.ParseSelectorOrDie( + "status.phase!=" + string(v1.PodSucceeded) + + ",status.phase!=" + string(v1.PodFailed)) + lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector) + return &podInformer{ + informer: cache.NewSharedIndexInformer(lw, &v1.Pod{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), + } +} + +// MakeDefaultErrorFunc construct a function to handle pod scheduler error +func MakeDefaultErrorFunc(client clientset.Interface, podQueue internalqueue.SchedulingQueue, schedulerCache internalcache.Cache) func(*framework.PodInfo, error) { + return func(podInfo *framework.PodInfo, err error) { + pod := podInfo.Pod + if err == core.ErrNoNodesAvailable { + klog.V(2).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) + } else { + if _, ok := err.(*core.FitError); ok { + klog.V(2).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) + } else if errors.IsNotFound(err) { + klog.V(2).Infof("Unable to schedule %v/%v: possibly due to node not found: %v; waiting", pod.Namespace, pod.Name, err) + if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { + nodeName := errStatus.Status().Details.Name + // when node is not found, We do not remove the node right away. Trying again to get + // the node and if the node is still not found, then remove it from the scheduler cache. + _, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + if err := schedulerCache.RemoveNode(&node); err != nil { + klog.V(4).Infof("Node %q is not found; failed to remove it from the cache.", node.Name) + } + } + } + } else { + klog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) + } + } + + podSchedulingCycle := podQueue.SchedulingCycle() + // Retry asynchronously. + // Note that this is extremely rudimentary and we need a more real error handling path. + go func() { + defer runtime.HandleCrash() + podID := types.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + } + + // An unschedulable pod will be placed in the unschedulable queue. + // This ensures that if the pod is nominated to run on a node, + // scheduler takes the pod into account when running predicates for the node. + // Get the pod again; it may have changed/been scheduled already. + getBackoff := initialGetBackoff + for { + pod, err := client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) + if err == nil { + if len(pod.Spec.NodeName) == 0 { + podInfo.Pod = pod + if err := podQueue.AddUnschedulableIfNotPresent(podInfo, podSchedulingCycle); err != nil { + klog.Error(err) + } + } + break + } + if errors.IsNotFound(err) { + klog.Warningf("A pod %v no longer exists", podID) + return + } + klog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) + if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff { + getBackoff = maximalGetBackoff + } + time.Sleep(getBackoff) + } + }() + } +} + +type binder struct { + Client clientset.Interface +} + +// Bind just does a POST binding RPC. +func (b *binder) Bind(binding *v1.Binding) error { + klog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) + return b.Client.CoreV1().Pods(binding.Namespace).Bind(binding) +} + +// GetPodDisruptionBudgetLister returns pdb lister from the given informer factory. Returns nil if PodDisruptionBudget feature is disabled. +func GetPodDisruptionBudgetLister(informerFactory informers.SharedInformerFactory) policylisters.PodDisruptionBudgetLister { + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodDisruptionBudget) { + return informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister() + } + return nil +} + +// GetCSINodeLister returns CSINode lister from the given informer factory. Returns nil if CSINodeInfo feature is disabled. +func GetCSINodeLister(informerFactory informers.SharedInformerFactory) storagelisters.CSINodeLister { + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) { + return informerFactory.Storage().V1().CSINodes().Lister() + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD deleted file mode 100644 index 7435994be91..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD +++ /dev/null @@ -1,98 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "plugins.go", - ], - importpath = "k8s.io/kubernetes/pkg/scheduler/factory", - visibility = ["//visibility:public"], - deps = [ - "//pkg/api/v1/pod:go_default_library", - "//pkg/features:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", - "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/api/validation:go_default_library", - "//pkg/scheduler/apis/config:go_default_library", - "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/internal/cache/debugger:go_default_library", - "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/volumebinder:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/client-go/tools/events:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "factory_test.go", - "plugins_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/api/testing:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", - "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/api/latest:go_default_library", - "//pkg/scheduler/apis/config:go_default_library", - "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", - "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go deleted file mode 100644 index 399a6bdf66c..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go +++ /dev/null @@ -1,651 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package factory can set up a scheduler. This code is here instead of -// cmd/scheduler for both testability and reuse. -package factory - -import ( - "fmt" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - appsinformers "k8s.io/client-go/informers/apps/v1" - coreinformers "k8s.io/client-go/informers/core/v1" - policyinformers "k8s.io/client-go/informers/policy/v1beta1" - storageinformersv1 "k8s.io/client-go/informers/storage/v1" - storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" - clientset "k8s.io/client-go/kubernetes" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" - policylisters "k8s.io/client-go/listers/policy/v1beta1" - storagelistersv1 "k8s.io/client-go/listers/storage/v1" - storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/events" - "k8s.io/klog" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "k8s.io/kubernetes/pkg/scheduler/api/validation" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/core" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" - cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" - internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" -) - -const ( - initialGetBackoff = 100 * time.Millisecond - maximalGetBackoff = time.Minute -) - -// Binder knows how to write a binding. -type Binder interface { - Bind(binding *v1.Binding) error -} - -// PodConditionUpdater updates the condition of a pod based on the passed -// PodCondition -type PodConditionUpdater interface { - Update(pod *v1.Pod, podCondition *v1.PodCondition) error -} - -// Config is an implementation of the Scheduler's configured input data. -// TODO over time we should make this struct a hidden implementation detail of the scheduler. -type Config struct { - SchedulerCache internalcache.Cache - - Algorithm core.ScheduleAlgorithm - GetBinder func(pod *v1.Pod) Binder - // PodConditionUpdater is used only in case of scheduling errors. If we succeed - // with scheduling, PodScheduled condition will be updated in apiserver in /bind - // handler so that binding and setting PodCondition it is atomic. - PodConditionUpdater PodConditionUpdater - // PodPreemptor is used to evict pods and update 'NominatedNode' field of - // the preemptor pod. - PodPreemptor PodPreemptor - // Framework runs scheduler plugins at configured extension points. - Framework framework.Framework - - // NextPod should be a function that blocks until the next pod - // is available. We don't use a channel for this, because scheduling - // a pod may take some amount of time and we don't want pods to get - // stale while they sit in a channel. - NextPod func() *v1.Pod - - // WaitForCacheSync waits for scheduler cache to populate. - // It returns true if it was successful, false if the controller should shutdown. - WaitForCacheSync func() bool - - // Error is called if there is an error. It is passed the pod in - // question, and the error - Error func(*v1.Pod, error) - - // Recorder is the EventRecorder to use - Recorder events.EventRecorder - - // Close this to shut down the scheduler. - StopEverything <-chan struct{} - - // VolumeBinder handles PVC/PV binding for the pod. - VolumeBinder *volumebinder.VolumeBinder - - // Disable pod preemption or not. - DisablePreemption bool - - // SchedulingQueue holds pods to be scheduled - SchedulingQueue internalqueue.SchedulingQueue -} - -// PodPreemptor has methods needed to delete a pod and to update 'NominatedPod' -// field of the preemptor pod. -type PodPreemptor interface { - GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) - DeletePod(pod *v1.Pod) error - SetNominatedNodeName(pod *v1.Pod, nominatedNode string) error - RemoveNominatedNodeName(pod *v1.Pod) error -} - -// Configurator defines I/O, caching, and other functionality needed to -// construct a new scheduler. -type Configurator struct { - client clientset.Interface - // a means to list all PersistentVolumes - pVLister corelisters.PersistentVolumeLister - // a means to list all PersistentVolumeClaims - pVCLister corelisters.PersistentVolumeClaimLister - // a means to list all services - serviceLister corelisters.ServiceLister - // a means to list all controllers - controllerLister corelisters.ReplicationControllerLister - // a means to list all replicasets - replicaSetLister appslisters.ReplicaSetLister - // a means to list all statefulsets - statefulSetLister appslisters.StatefulSetLister - // a means to list all PodDisruptionBudgets - pdbLister policylisters.PodDisruptionBudgetLister - // a means to list all StorageClasses - storageClassLister storagelistersv1.StorageClassLister - // a means to list all CSINodes - csiNodeLister storagelistersv1beta1.CSINodeLister - // framework has a set of plugins and the context used for running them. - framework framework.Framework - - // Close this to stop all reflectors - StopEverything <-chan struct{} - - scheduledPodsHasSynced cache.InformerSynced - - schedulerCache internalcache.Cache - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - hardPodAffinitySymmetricWeight int32 - - // Handles volume binding decisions - volumeBinder *volumebinder.VolumeBinder - - // Always check all predicates even if the middle of one predicate fails. - alwaysCheckAllPredicates bool - - // Disable pod preemption or not. - disablePreemption bool - - // percentageOfNodesToScore specifies percentage of all nodes to score in each scheduling cycle. - percentageOfNodesToScore int32 - - bindTimeoutSeconds int64 - // queue for pods that need scheduling - podQueue internalqueue.SchedulingQueue - - enableNonPreempting bool -} - -// ConfigFactoryArgs is a set arguments passed to NewConfigFactory. -type ConfigFactoryArgs struct { - Client clientset.Interface - NodeInformer coreinformers.NodeInformer - PodInformer coreinformers.PodInformer - PvInformer coreinformers.PersistentVolumeInformer - PvcInformer coreinformers.PersistentVolumeClaimInformer - ReplicationControllerInformer coreinformers.ReplicationControllerInformer - ReplicaSetInformer appsinformers.ReplicaSetInformer - StatefulSetInformer appsinformers.StatefulSetInformer - ServiceInformer coreinformers.ServiceInformer - PdbInformer policyinformers.PodDisruptionBudgetInformer - StorageClassInformer storageinformersv1.StorageClassInformer - CSINodeInformer storageinformersv1beta1.CSINodeInformer - HardPodAffinitySymmetricWeight int32 - DisablePreemption bool - PercentageOfNodesToScore int32 - BindTimeoutSeconds int64 - StopCh <-chan struct{} - Registry framework.Registry - Plugins *config.Plugins - PluginConfig []config.PluginConfig -} - -// NewConfigFactory initializes the default implementation of a Configurator. To encourage eventual privatization of the struct type, we only -// return the interface. -func NewConfigFactory(args *ConfigFactoryArgs) *Configurator { - stopEverything := args.StopCh - if stopEverything == nil { - stopEverything = wait.NeverStop - } - schedulerCache := internalcache.New(30*time.Second, stopEverything) - - framework, err := framework.NewFramework(args.Registry, args.Plugins, args.PluginConfig) - if err != nil { - klog.Fatalf("error initializing the scheduling framework: %v", err) - } - - // storageClassInformer is only enabled through VolumeScheduling feature gate - var storageClassLister storagelistersv1.StorageClassLister - if args.StorageClassInformer != nil { - storageClassLister = args.StorageClassInformer.Lister() - } - - var csiNodeLister storagelistersv1beta1.CSINodeLister - if args.CSINodeInformer != nil { - csiNodeLister = args.CSINodeInformer.Lister() - } - - c := &Configurator{ - client: args.Client, - podQueue: internalqueue.NewSchedulingQueue(stopEverything, framework), - pVLister: args.PvInformer.Lister(), - pVCLister: args.PvcInformer.Lister(), - serviceLister: args.ServiceInformer.Lister(), - controllerLister: args.ReplicationControllerInformer.Lister(), - replicaSetLister: args.ReplicaSetInformer.Lister(), - statefulSetLister: args.StatefulSetInformer.Lister(), - pdbLister: args.PdbInformer.Lister(), - storageClassLister: storageClassLister, - csiNodeLister: csiNodeLister, - framework: framework, - schedulerCache: schedulerCache, - StopEverything: stopEverything, - hardPodAffinitySymmetricWeight: args.HardPodAffinitySymmetricWeight, - disablePreemption: args.DisablePreemption, - percentageOfNodesToScore: args.PercentageOfNodesToScore, - bindTimeoutSeconds: args.BindTimeoutSeconds, - enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(features.NonPreemptingPriority), - } - // Setup volume binder - c.volumeBinder = volumebinder.NewVolumeBinder(args.Client, args.NodeInformer, args.PvcInformer, args.PvInformer, args.StorageClassInformer, time.Duration(args.BindTimeoutSeconds)*time.Second) - c.scheduledPodsHasSynced = args.PodInformer.Informer().HasSynced - - // Setup cache debugger - debugger := cachedebugger.New( - args.NodeInformer.Lister(), - args.PodInformer.Lister(), - c.schedulerCache, - c.podQueue, - ) - debugger.ListenForSignal(c.StopEverything) - - go func() { - <-c.StopEverything - c.podQueue.Close() - }() - return c -} - -// GetHardPodAffinitySymmetricWeight is exposed for testing. -func (c *Configurator) GetHardPodAffinitySymmetricWeight() int32 { - return c.hardPodAffinitySymmetricWeight -} - -// Create creates a scheduler with the default algorithm provider. -func (c *Configurator) Create() (*Config, error) { - return c.CreateFromProvider(DefaultProvider) -} - -// CreateFromProvider creates a scheduler from the name of a registered algorithm provider. -func (c *Configurator) CreateFromProvider(providerName string) (*Config, error) { - klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) - provider, err := GetAlgorithmProvider(providerName) - if err != nil { - return nil, err - } - return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{}) -} - -// CreateFromConfig creates a scheduler from the configuration file -func (c *Configurator) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) { - klog.V(2).Infof("Creating scheduler from configuration: %v", policy) - - // validate the policy configuration - if err := validation.ValidatePolicy(policy); err != nil { - return nil, err - } - - predicateKeys := sets.NewString() - if policy.Predicates == nil { - klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) - provider, err := GetAlgorithmProvider(DefaultProvider) - if err != nil { - return nil, err - } - predicateKeys = provider.FitPredicateKeys - } else { - for _, predicate := range policy.Predicates { - klog.V(2).Infof("Registering predicate: %s", predicate.Name) - predicateKeys.Insert(RegisterCustomFitPredicate(predicate)) - } - } - - priorityKeys := sets.NewString() - if policy.Priorities == nil { - klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) - provider, err := GetAlgorithmProvider(DefaultProvider) - if err != nil { - return nil, err - } - priorityKeys = provider.PriorityFunctionKeys - } else { - for _, priority := range policy.Priorities { - klog.V(2).Infof("Registering priority: %s", priority.Name) - priorityKeys.Insert(RegisterCustomPriorityFunction(priority)) - } - } - - var extenders []algorithm.SchedulerExtender - if len(policy.ExtenderConfigs) != 0 { - ignoredExtendedResources := sets.NewString() - var ignorableExtenders []algorithm.SchedulerExtender - for ii := range policy.ExtenderConfigs { - klog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii]) - extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii]) - if err != nil { - return nil, err - } - if !extender.IsIgnorable() { - extenders = append(extenders, extender) - } else { - ignorableExtenders = append(ignorableExtenders, extender) - } - for _, r := range policy.ExtenderConfigs[ii].ManagedResources { - if r.IgnoredByScheduler { - ignoredExtendedResources.Insert(string(r.Name)) - } - } - } - // place ignorable extenders to the tail of extenders - extenders = append(extenders, ignorableExtenders...) - predicates.RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtendedResources) - } - // Providing HardPodAffinitySymmetricWeight in the policy config is the new and preferred way of providing the value. - // Give it higher precedence than scheduler CLI configuration when it is provided. - if policy.HardPodAffinitySymmetricWeight != 0 { - c.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight - } - // When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured - // predicates even after one or more of them fails. - if policy.AlwaysCheckAllPredicates { - c.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates - } - - return c.CreateFromKeys(predicateKeys, priorityKeys, extenders) -} - -// CreateFromKeys creates a scheduler from a set of registered fit predicate keys and priority keys. -func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) { - klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) - - if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 { - return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight()) - } - - predicateFuncs, err := c.GetPredicates(predicateKeys) - if err != nil { - return nil, err - } - - priorityConfigs, err := c.getPriorityFunctionConfigs(priorityKeys) - if err != nil { - return nil, err - } - - priorityMetaProducer, err := c.getPriorityMetadataProducer() - if err != nil { - return nil, err - } - - predicateMetaProducer, err := c.GetPredicateMetadataProducer() - if err != nil { - return nil, err - } - - algo := core.NewGenericScheduler( - c.schedulerCache, - c.podQueue, - predicateFuncs, - predicateMetaProducer, - priorityConfigs, - priorityMetaProducer, - c.framework, - extenders, - c.volumeBinder, - c.pVCLister, - c.pdbLister, - c.alwaysCheckAllPredicates, - c.disablePreemption, - c.percentageOfNodesToScore, - c.enableNonPreempting, - ) - - return &Config{ - SchedulerCache: c.schedulerCache, - Algorithm: algo, - GetBinder: getBinderFunc(c.client, extenders), - PodConditionUpdater: &podConditionUpdater{c.client}, - PodPreemptor: &podPreemptor{c.client}, - Framework: c.framework, - WaitForCacheSync: func() bool { - return cache.WaitForCacheSync(c.StopEverything, c.scheduledPodsHasSynced) - }, - NextPod: internalqueue.MakeNextPodFunc(c.podQueue), - Error: MakeDefaultErrorFunc(c.client, c.podQueue, c.schedulerCache, c.StopEverything), - StopEverything: c.StopEverything, - VolumeBinder: c.volumeBinder, - SchedulingQueue: c.podQueue, - }, nil -} - -// getBinderFunc returns a func which returns an extender that supports bind or a default binder based on the given pod. -func getBinderFunc(client clientset.Interface, extenders []algorithm.SchedulerExtender) func(pod *v1.Pod) Binder { - defaultBinder := &binder{client} - return func(pod *v1.Pod) Binder { - for _, extender := range extenders { - if extender.IsBinder() && extender.IsInterested(pod) { - return extender - } - } - return defaultBinder - } -} - -func (c *Configurator) getPriorityFunctionConfigs(priorityKeys sets.String) ([]priorities.PriorityConfig, error) { - pluginArgs, err := c.getPluginArgs() - if err != nil { - return nil, err - } - - return getPriorityFunctionConfigs(priorityKeys, *pluginArgs) -} - -func (c *Configurator) getPriorityMetadataProducer() (priorities.PriorityMetadataProducer, error) { - pluginArgs, err := c.getPluginArgs() - if err != nil { - return nil, err - } - - return getPriorityMetadataProducer(*pluginArgs) -} - -// GetPredicateMetadataProducer returns a function to build Predicate Metadata. -// It is used by the scheduler and other components, such as k8s.io/autoscaler/cluster-autoscaler. -func (c *Configurator) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) { - pluginArgs, err := c.getPluginArgs() - if err != nil { - return nil, err - } - return getPredicateMetadataProducer(*pluginArgs) -} - -// GetPredicates returns the predicate functions. -// It is used by the scheduler and other components, such as k8s.io/autoscaler/cluster-autoscaler. -func (c *Configurator) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error) { - pluginArgs, err := c.getPluginArgs() - if err != nil { - return nil, err - } - - return getFitPredicateFunctions(predicateKeys, *pluginArgs) -} - -func (c *Configurator) getPluginArgs() (*PluginFactoryArgs, error) { - return &PluginFactoryArgs{ - PodLister: c.schedulerCache, - ServiceLister: c.serviceLister, - ControllerLister: c.controllerLister, - ReplicaSetLister: c.replicaSetLister, - StatefulSetLister: c.statefulSetLister, - NodeLister: c.schedulerCache, - PDBLister: c.pdbLister, - NodeInfo: c.schedulerCache, - CSINodeInfo: c.schedulerCache, - PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister}, - PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister}, - StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: c.storageClassLister}, - VolumeBinder: c.volumeBinder, - HardPodAffinitySymmetricWeight: c.hardPodAffinitySymmetricWeight, - }, nil -} - -type podInformer struct { - informer cache.SharedIndexInformer -} - -func (i *podInformer) Informer() cache.SharedIndexInformer { - return i.informer -} - -func (i *podInformer) Lister() corelisters.PodLister { - return corelisters.NewPodLister(i.informer.GetIndexer()) -} - -// NewPodInformer creates a shared index informer that returns only non-terminal pods. -func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) coreinformers.PodInformer { - selector := fields.ParseSelectorOrDie( - "status.phase!=" + string(v1.PodSucceeded) + - ",status.phase!=" + string(v1.PodFailed)) - lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector) - return &podInformer{ - informer: cache.NewSharedIndexInformer(lw, &v1.Pod{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), - } -} - -// MakeDefaultErrorFunc construct a function to handle pod scheduler error -func MakeDefaultErrorFunc(client clientset.Interface, podQueue internalqueue.SchedulingQueue, schedulerCache internalcache.Cache, stopEverything <-chan struct{}) func(pod *v1.Pod, err error) { - return func(pod *v1.Pod, err error) { - if err == core.ErrNoNodesAvailable { - klog.V(2).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) - } else { - if _, ok := err.(*core.FitError); ok { - klog.V(2).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) - } else if errors.IsNotFound(err) { - klog.V(2).Infof("Unable to schedule %v/%v: possibly due to node not found: %v; waiting", pod.Namespace, pod.Name, err) - if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { - nodeName := errStatus.Status().Details.Name - // when node is not found, We do not remove the node right away. Trying again to get - // the node and if the node is still not found, then remove it from the scheduler cache. - _, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { - node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - if err := schedulerCache.RemoveNode(&node); err != nil { - klog.V(4).Infof("Node %q is not found; failed to remove it from the cache.", node.Name) - } - } - } - } else { - klog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) - } - } - - podSchedulingCycle := podQueue.SchedulingCycle() - // Retry asynchronously. - // Note that this is extremely rudimentary and we need a more real error handling path. - go func() { - defer runtime.HandleCrash() - podID := types.NamespacedName{ - Namespace: pod.Namespace, - Name: pod.Name, - } - - // An unschedulable pod will be placed in the unschedulable queue. - // This ensures that if the pod is nominated to run on a node, - // scheduler takes the pod into account when running predicates for the node. - // Get the pod again; it may have changed/been scheduled already. - getBackoff := initialGetBackoff - for { - pod, err := client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) - if err == nil { - if len(pod.Spec.NodeName) == 0 { - if err := podQueue.AddUnschedulableIfNotPresent(pod, podSchedulingCycle); err != nil { - klog.Error(err) - } - } - break - } - if errors.IsNotFound(err) { - klog.Warningf("A pod %v no longer exists", podID) - return - } - klog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) - if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff { - getBackoff = maximalGetBackoff - } - time.Sleep(getBackoff) - } - }() - } -} - -type binder struct { - Client clientset.Interface -} - -// Bind just does a POST binding RPC. -func (b *binder) Bind(binding *v1.Binding) error { - klog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) - return b.Client.CoreV1().Pods(binding.Namespace).Bind(binding) -} - -type podConditionUpdater struct { - Client clientset.Interface -} - -func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error { - klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason) - if podutil.UpdatePodCondition(&pod.Status, condition) { - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) - return err - } - return nil -} - -type podPreemptor struct { - Client clientset.Interface -} - -func (p *podPreemptor) GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) { - return p.Client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) -} - -func (p *podPreemptor) DeletePod(pod *v1.Pod) error { - return p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) -} - -func (p *podPreemptor) SetNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error { - podCopy := pod.DeepCopy() - podCopy.Status.NominatedNodeName = nominatedNodeName - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(podCopy) - return err -} - -func (p *podPreemptor) RemoveNominatedNodeName(pod *v1.Pod) error { - if len(pod.Status.NominatedNodeName) == 0 { - return nil - } - return p.SetNominatedNodeName(pod, "") -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/BUILD new file mode 100644 index 00000000000..d355afa308d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/BUILD @@ -0,0 +1,81 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["default_registry.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library", + "//pkg/scheduler/framework/plugins/imagelocality:go_default_library", + "//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library", + "//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library", + "//pkg/scheduler/framework/plugins/nodelabel:go_default_library", + "//pkg/scheduler/framework/plugins/nodename:go_default_library", + "//pkg/scheduler/framework/plugins/nodeports:go_default_library", + "//pkg/scheduler/framework/plugins/nodepreferavoidpods:go_default_library", + "//pkg/scheduler/framework/plugins/noderesources:go_default_library", + "//pkg/scheduler/framework/plugins/nodeunschedulable:go_default_library", + "//pkg/scheduler/framework/plugins/nodevolumelimits:go_default_library", + "//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library", + "//pkg/scheduler/framework/plugins/requestedtocapacityratio:go_default_library", + "//pkg/scheduler/framework/plugins/serviceaffinity:go_default_library", + "//pkg/scheduler/framework/plugins/tainttoleration:go_default_library", + "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", + "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", + "//pkg/scheduler/framework/plugins/volumezone:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/scheduler/framework/plugins/defaultpodtopologyspread:all-srcs", + "//pkg/scheduler/framework/plugins/examples:all-srcs", + "//pkg/scheduler/framework/plugins/imagelocality:all-srcs", + "//pkg/scheduler/framework/plugins/interpodaffinity:all-srcs", + "//pkg/scheduler/framework/plugins/migration:all-srcs", + "//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs", + "//pkg/scheduler/framework/plugins/nodelabel:all-srcs", + "//pkg/scheduler/framework/plugins/nodename:all-srcs", + "//pkg/scheduler/framework/plugins/nodeports:all-srcs", + "//pkg/scheduler/framework/plugins/nodepreferavoidpods:all-srcs", + "//pkg/scheduler/framework/plugins/noderesources:all-srcs", + "//pkg/scheduler/framework/plugins/nodeunschedulable:all-srcs", + "//pkg/scheduler/framework/plugins/nodevolumelimits:all-srcs", + "//pkg/scheduler/framework/plugins/podtopologyspread:all-srcs", + "//pkg/scheduler/framework/plugins/requestedtocapacityratio:all-srcs", + "//pkg/scheduler/framework/plugins/serviceaffinity:all-srcs", + "//pkg/scheduler/framework/plugins/tainttoleration:all-srcs", + "//pkg/scheduler/framework/plugins/volumebinding:all-srcs", + "//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs", + "//pkg/scheduler/framework/plugins/volumezone:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["default_registry_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/apis/config:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/default_registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/default_registry.go new file mode 100644 index 00000000000..d5811fb634d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/default_registry.go @@ -0,0 +1,337 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" +) + +// RegistryArgs arguments needed to create default plugin factories. +type RegistryArgs struct { + VolumeBinder *volumebinder.VolumeBinder +} + +// NewDefaultRegistry builds the default registry with all the in-tree plugins. +// This is the registry that Kubernetes default scheduler uses. A scheduler that runs out of tree +// plugins can register additional plugins through the WithFrameworkOutOfTreeRegistry option. +func NewDefaultRegistry(args *RegistryArgs) framework.Registry { + return framework.Registry{ + defaultpodtopologyspread.Name: defaultpodtopologyspread.New, + imagelocality.Name: imagelocality.New, + tainttoleration.Name: tainttoleration.New, + nodename.Name: nodename.New, + nodeports.Name: nodeports.New, + nodepreferavoidpods.Name: nodepreferavoidpods.New, + nodeaffinity.Name: nodeaffinity.New, + podtopologyspread.Name: podtopologyspread.New, + nodeunschedulable.Name: nodeunschedulable.New, + noderesources.FitName: noderesources.NewFit, + noderesources.BalancedAllocationName: noderesources.NewBalancedAllocation, + noderesources.MostAllocatedName: noderesources.NewMostAllocated, + noderesources.LeastAllocatedName: noderesources.NewLeastAllocated, + volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil + }, + volumerestrictions.Name: volumerestrictions.New, + volumezone.Name: volumezone.New, + nodevolumelimits.CSIName: nodevolumelimits.NewCSI, + nodevolumelimits.EBSName: nodevolumelimits.NewEBS, + nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD, + nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk, + nodevolumelimits.CinderName: nodevolumelimits.NewCinder, + interpodaffinity.Name: interpodaffinity.New, + nodelabel.Name: nodelabel.New, + requestedtocapacityratio.Name: requestedtocapacityratio.New, + serviceaffinity.Name: serviceaffinity.New, + } +} + +// ConfigProducerArgs contains arguments that are passed to the producer. +// As we add more predicates/priorities to framework plugins mappings, more arguments +// may be added here. +type ConfigProducerArgs struct { + // Weight used for priority functions. + Weight int32 + // NodeLabelArgs is the args for the NodeLabel plugin. + NodeLabelArgs *nodelabel.Args + // RequestedToCapacityRatioArgs is the args for the RequestedToCapacityRatio plugin. + RequestedToCapacityRatioArgs *requestedtocapacityratio.Args + // ServiceAffinityArgs is the args for the ServiceAffinity plugin. + ServiceAffinityArgs *serviceaffinity.Args +} + +// ConfigProducer produces a framework's configuration. +type ConfigProducer func(args ConfigProducerArgs) (config.Plugins, []config.PluginConfig) + +// ConfigProducerRegistry tracks mappings from predicates/priorities to framework config producers. +type ConfigProducerRegistry struct { + // maps that associate predicates/priorities with framework plugin configurations. + PredicateToConfigProducer map[string]ConfigProducer + PriorityToConfigProducer map[string]ConfigProducer +} + +// NewDefaultConfigProducerRegistry creates a new producer registry. +func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry { + registry := &ConfigProducerRegistry{ + PredicateToConfigProducer: make(map[string]ConfigProducer), + PriorityToConfigProducer: make(map[string]ConfigProducer), + } + // Register Predicates. + registry.RegisterPredicate(predicates.GeneralPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + // GeneralPredicate is a combination of predicates. + plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil) + plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil) + plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil) + plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil) + return + }) + registry.RegisterPredicate(predicates.PodToleratesNodeTaintsPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, tainttoleration.Name, nil) + return + }) + registry.RegisterPredicate(predicates.PodFitsResourcesPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil) + return + }) + registry.RegisterPredicate(predicates.HostNamePred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil) + return + }) + registry.RegisterPredicate(predicates.PodFitsHostPortsPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil) + return + }) + registry.RegisterPredicate(predicates.MatchNodeSelectorPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil) + return + }) + registry.RegisterPredicate(predicates.CheckNodeUnschedulablePred, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodeunschedulable.Name, nil) + return + }) + registry.RegisterPredicate(predicates.CheckVolumeBindingPred, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, volumebinding.Name, nil) + return + }) + registry.RegisterPredicate(predicates.NoDiskConflictPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, volumerestrictions.Name, nil) + return + }) + registry.RegisterPredicate(predicates.NoVolumeZoneConflictPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, volumezone.Name, nil) + return + }) + registry.RegisterPredicate(predicates.MaxCSIVolumeCountPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CSIName, nil) + return + }) + registry.RegisterPredicate(predicates.MaxEBSVolumeCountPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.EBSName, nil) + return + }) + registry.RegisterPredicate(predicates.MaxGCEPDVolumeCountPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.GCEPDName, nil) + return + }) + registry.RegisterPredicate(predicates.MaxAzureDiskVolumeCountPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.AzureDiskName, nil) + return + }) + registry.RegisterPredicate(predicates.MaxCinderVolumeCountPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CinderName, nil) + return + }) + registry.RegisterPredicate(predicates.MatchInterPodAffinityPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, interpodaffinity.Name, nil) + return + }) + registry.RegisterPredicate(predicates.EvenPodsSpreadPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, podtopologyspread.Name, nil) + return + }) + registry.RegisterPredicate(predicates.CheckNodeLabelPresencePred, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, nodelabel.Name, nil) + pluginConfig = append(pluginConfig, makePluginConfig(nodelabel.Name, args.NodeLabelArgs)) + return + }) + registry.RegisterPredicate(predicates.CheckServiceAffinityPred, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, serviceaffinity.Name, nil) + pluginConfig = append(pluginConfig, makePluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs)) + return + }) + + // Register Priorities. + registry.RegisterPriority(priorities.SelectorSpreadPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, defaultpodtopologyspread.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.TaintTolerationPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, tainttoleration.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.NodeAffinityPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, nodeaffinity.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.ImageLocalityPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, imagelocality.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.InterPodAffinityPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, interpodaffinity.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.NodePreferAvoidPodsPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, nodepreferavoidpods.Name, &args.Weight) + return + }) + registry.RegisterPriority(priorities.MostRequestedPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, noderesources.MostAllocatedName, &args.Weight) + return + }) + registry.RegisterPriority(priorities.BalancedResourceAllocation, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, noderesources.BalancedAllocationName, &args.Weight) + return + }) + registry.RegisterPriority(priorities.LeastRequestedPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, noderesources.LeastAllocatedName, &args.Weight) + return + }) + registry.RegisterPriority(priorities.EvenPodsSpreadPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, podtopologyspread.Name, &args.Weight) + return + }) + registry.RegisterPriority(requestedtocapacityratio.Name, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, requestedtocapacityratio.Name, &args.Weight) + pluginConfig = append(pluginConfig, makePluginConfig(requestedtocapacityratio.Name, args.RequestedToCapacityRatioArgs)) + return + }) + + registry.RegisterPriority(nodelabel.Name, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, nodelabel.Name, &args.Weight) + pluginConfig = append(pluginConfig, makePluginConfig(nodelabel.Name, args.NodeLabelArgs)) + return + }) + registry.RegisterPriority(serviceaffinity.Name, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, serviceaffinity.Name, &args.Weight) + pluginConfig = append(pluginConfig, makePluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs)) + return + }) + return registry +} + +func registerProducer(name string, producer ConfigProducer, producersMap map[string]ConfigProducer) error { + if _, exist := producersMap[name]; exist { + return fmt.Errorf("already registered %q", name) + } + producersMap[name] = producer + return nil +} + +// RegisterPredicate registers a config producer for a predicate. +func (f *ConfigProducerRegistry) RegisterPredicate(name string, producer ConfigProducer) error { + return registerProducer(name, producer, f.PredicateToConfigProducer) +} + +// RegisterPriority registers a framework config producer for a priority. +func (f *ConfigProducerRegistry) RegisterPriority(name string, producer ConfigProducer) error { + return registerProducer(name, producer, f.PriorityToConfigProducer) +} + +func appendToPluginSet(set *config.PluginSet, name string, weight *int32) *config.PluginSet { + if set == nil { + set = &config.PluginSet{} + } + cfg := config.Plugin{Name: name} + if weight != nil { + cfg.Weight = *weight + } + set.Enabled = append(set.Enabled, cfg) + return set +} + +func makePluginConfig(pluginName string, args interface{}) config.PluginConfig { + encoding, err := json.Marshal(args) + if err != nil { + klog.Fatal(fmt.Errorf("Failed to marshal %+v: %v", args, err)) + return config.PluginConfig{} + } + config := config.PluginConfig{ + Name: pluginName, + Args: runtime.Unknown{Raw: encoding}, + } + return config +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD new file mode 100644 index 00000000000..630262587a5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["default_pod_topology_spread.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["default_pod_topology_spread_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go new file mode 100644 index 00000000000..1b0945dbe55 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultpodtopologyspread + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// DefaultPodTopologySpread is a plugin that calculates selector spread priority. +type DefaultPodTopologySpread struct { + handle framework.FrameworkHandle + calculateSpreadPriorityMap priorities.PriorityMapFunction + calculateSpreadPriorityReduce priorities.PriorityReduceFunction +} + +var _ framework.ScorePlugin = &DefaultPodTopologySpread{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "DefaultPodTopologySpread" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *DefaultPodTopologySpread) Name() string { + return Name +} + +// Score invoked at the Score extension point. +// The "score" returned in this function is the matching number of pods on the `nodeName`, +// it is normalized later. +func (pl *DefaultPodTopologySpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := pl.calculateSpreadPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *DefaultPodTopologySpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + meta := migration.PriorityMetadata(state) + err := pl.calculateSpreadPriorityReduce(pod, meta, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *DefaultPodTopologySpread) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + calculateSpreadPriorityMap, calculateSpreadPriorityReduce := priorities.NewSelectorSpreadPriority( + informerFactory.Core().V1().Services().Lister(), + informerFactory.Core().V1().ReplicationControllers().Lister(), + informerFactory.Apps().V1().ReplicaSets().Lister(), + informerFactory.Apps().V1().StatefulSets().Lister(), + ) + + return &DefaultPodTopologySpread{ + handle: handle, + calculateSpreadPriorityMap: calculateSpreadPriorityMap, + calculateSpreadPriorityReduce: calculateSpreadPriorityReduce, + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/BUILD new file mode 100644 index 00000000000..2347f104b9f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/BUILD @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["image_locality.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["image_locality_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/util/parsers:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go new file mode 100644 index 00000000000..f8555c528ad --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package imagelocality + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// ImageLocality is a score plugin that favors nodes that already have requested pod container's images. +type ImageLocality struct { + handle framework.FrameworkHandle +} + +var _ framework.ScorePlugin = &ImageLocality{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "ImageLocality" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *ImageLocality) Name() string { + return Name +} + +// Score invoked at the score extension point. +func (pl *ImageLocality) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := priorities.ImageLocalityPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *ImageLocality) ScoreExtensions() framework.ScoreExtensions { + return nil +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &ImageLocality{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/BUILD new file mode 100644 index 00000000000..d9861323b94 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["interpod_affinity.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["interpod_affinity_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity.go new file mode 100644 index 00000000000..3f562f55275 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpodaffinity + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// InterPodAffinity is a plugin that checks inter pod affinity +type InterPodAffinity struct { + handle framework.FrameworkHandle + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &InterPodAffinity{} +var _ framework.ScorePlugin = &InterPodAffinity{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "InterPodAffinity" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *InterPodAffinity) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata) + if !ok { + return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState)) + } + _, reasons, err := pl.predicate(pod, meta, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the Score extension point. +// The "score" returned in this function is the matching number of pods on the `nodeName`, +// it is normalized later. +func (pl *InterPodAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := priorities.CalculateInterPodAffinityPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + meta := migration.PriorityMetadata(state) + err := priorities.CalculateInterPodAffinityPriorityReduce(pod, meta, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *InterPodAffinity) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &InterPodAffinity{ + handle: h, + predicate: predicates.NewPodAffinityPredicate(h.SnapshotSharedLister().NodeInfos(), h.SnapshotSharedLister().Pods()), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/BUILD new file mode 100644 index 00000000000..1a291e45aac --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["utils.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["utils_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/utils.go new file mode 100644 index 00000000000..18c47a4ec51 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration/utils.go @@ -0,0 +1,121 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package migration + +import ( + "k8s.io/klog" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +const ( + // PredicatesStateKey is the key in CycleState to PredicateStateData + PredicatesStateKey = "predicates" + + // PrioritiesStateKey is the key in CycleState to PrioritiesStateData + PrioritiesStateKey = "priorities" +) + +// PredicateResultToFrameworkStatus converts a predicate result (PredicateFailureReason + error) +// to a framework status. +func PredicateResultToFrameworkStatus(reasons []predicates.PredicateFailureReason, err error) *framework.Status { + if s := ErrorToFrameworkStatus(err); s != nil { + return s + } + + if len(reasons) == 0 { + return nil + } + + if r := predicates.UnresolvablePredicateExists(reasons); r != nil { + return framework.NewStatus(framework.UnschedulableAndUnresolvable, r.GetReason()) + } + + // We will just use the first reason. + return framework.NewStatus(framework.Unschedulable, reasons[0].GetReason()) +} + +// ErrorToFrameworkStatus converts an error to a framework status. +func ErrorToFrameworkStatus(err error) *framework.Status { + if err != nil { + return framework.NewStatus(framework.Error, err.Error()) + } + return nil +} + +// PredicatesStateData is a pointer to Metadata. In the normal case, StateData is supposed to +// be generated and stored in CycleState by a framework plugin (like a PreFilter pre-computing data for +// its corresponding Filter). However, during migration, the scheduler will inject a pointer to +// Metadata into CycleState. This "hack" is necessary because during migration Filters that implement +// predicates functionality will be calling into the existing predicate functions, and need +// to pass Metadata. +type PredicatesStateData struct { + Reference interface{} +} + +// Clone is supposed to make a copy of the data, but since this is just a pointer, we are practically +// just copying the pointer. This is ok because the actual reference to the Metadata +// copy that is made by generic_scheduler during preemption cycle will be injected again outside +// the framework. +func (p *PredicatesStateData) Clone() framework.StateData { + return &PredicatesStateData{ + Reference: p.Reference, + } +} + +// PrioritiesStateData is a pointer to PrioritiesMetadata. +type PrioritiesStateData struct { + Reference interface{} +} + +// Clone is supposed to make a copy of the data, but since this is just a pointer, we are practically +// just copying the pointer. +func (p *PrioritiesStateData) Clone() framework.StateData { + return &PrioritiesStateData{ + Reference: p.Reference, + } +} + +// PriorityMetadata returns priority metadata stored in CycleState. +func PriorityMetadata(state *framework.CycleState) interface{} { + if state == nil { + return nil + } + + var meta interface{} + if s, err := state.Read(PrioritiesStateKey); err == nil { + meta = s.(*PrioritiesStateData).Reference + } else { + klog.Errorf("reading key %q from CycleState, continuing without metadata: %v", PrioritiesStateKey, err) + } + return meta +} + +// PredicateMetadata returns predicate metadata stored in CycleState. +func PredicateMetadata(state *framework.CycleState) interface{} { + if state == nil { + return nil + } + + var meta interface{} + if s, err := state.Read(PredicatesStateKey); err == nil { + meta = s.(*PredicatesStateData).Reference + } else { + klog.Errorf("reading key %q from CycleState, continuing without metadata: %v", PredicatesStateKey, err) + } + return meta +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/BUILD new file mode 100644 index 00000000000..8db0ba1efac --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/BUILD @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["node_affinity.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["node_affinity_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go new file mode 100644 index 00000000000..e086b0d8d1f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeaffinity + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// NodeAffinity is a plugin that checks if a pod node selector matches the node label. +type NodeAffinity struct { + handle framework.FrameworkHandle +} + +var _ framework.FilterPlugin = &NodeAffinity{} +var _ framework.ScorePlugin = &NodeAffinity{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "NodeAffinity" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodeAffinity) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + _, reasons, err := predicates.PodMatchNodeSelector(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the Score extension point. +func (pl *NodeAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := priorities.CalculateNodeAffinityPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + // Note that CalculateNodeAffinityPriorityReduce doesn't use priority metadata, hence passing nil here. + err := priorities.CalculateNodeAffinityPriorityReduce(pod, nil, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *NodeAffinity) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &NodeAffinity{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/BUILD new file mode 100644 index 00000000000..cc70641a259 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["node_label.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["node_label_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/node_label.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/node_label.go new file mode 100644 index 00000000000..1a91bd5e46d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel/node_label.go @@ -0,0 +1,118 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodelabel + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// Name of this plugin. +const Name = "NodeLabel" + +// Args holds the args that are used to configure the plugin. +type Args struct { + // PresentLabels should be present for the node to be considered a fit for hosting the pod + PresentLabels []string `json:"presentLabels,omitempty"` + // AbsentLabels should be absent for the node to be considered a fit for hosting the pod + AbsentLabels []string `json:"absentLabels,omitempty"` + // Nodes that have labels in the list will get a higher score. + PresentLabelsPreference []string `json:"presentLabelsPreference,omitempty"` + // Nodes that don't have labels in the list will get a higher score. + AbsentLabelsPreference []string `json:"absentLabelsPreference,omitempty"` +} + +// validateArgs validates that presentLabels and absentLabels do not conflict. +func validateNoConflict(presentLabels []string, absentLabels []string) error { + m := make(map[string]struct{}, len(presentLabels)) + for _, l := range presentLabels { + m[l] = struct{}{} + } + for _, l := range absentLabels { + if _, ok := m[l]; ok { + return fmt.Errorf("detecting at least one label (e.g., %q) that exist in both the present(%+v) and absent(%+v) label list", l, presentLabels, absentLabels) + } + } + return nil +} + +// New initializes a new plugin and returns it. +func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + args := &Args{} + if err := framework.DecodeInto(plArgs, args); err != nil { + return nil, err + } + if err := validateNoConflict(args.PresentLabels, args.AbsentLabels); err != nil { + return nil, err + } + if err := validateNoConflict(args.PresentLabelsPreference, args.AbsentLabelsPreference); err != nil { + return nil, err + } + // Note that the reduce function is always nil therefore it's ignored. + prioritize, _ := priorities.NewNodeLabelPriority(args.PresentLabelsPreference, args.AbsentLabelsPreference) + return &NodeLabel{ + handle: handle, + predicate: predicates.NewNodeLabelPredicate(args.PresentLabels, args.AbsentLabels), + prioritize: prioritize, + }, nil +} + +// NodeLabel checks whether a pod can fit based on the node labels which match a filter that it requests. +type NodeLabel struct { + handle framework.FrameworkHandle + predicate predicates.FitPredicate + prioritize priorities.PriorityMapFunction +} + +var _ framework.FilterPlugin = &NodeLabel{} +var _ framework.ScorePlugin = &NodeLabel{} + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodeLabel) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // Note that NodeLabelPredicate doesn't use predicate metadata, hence passing nil here. + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the score extension point. +func (pl *NodeLabel) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + // Note that node label priority function doesn't use metadata, hence passing nil here. + s, err := pl.prioritize(pod, nil, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *NodeLabel) ScoreExtensions() framework.ScoreExtensions { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/BUILD new file mode 100644 index 00000000000..71eca63f553 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["node_name.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["node_name_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go new file mode 100644 index 00000000000..90a3d84d5c6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodename + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// NodeName is a plugin that checks if a pod spec node name matches the current node. +type NodeName struct{} + +var _ framework.FilterPlugin = &NodeName{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "NodeName" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodeName) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + _, reasons, err := predicates.PodFitsHost(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &NodeName{}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/BUILD new file mode 100644 index 00000000000..42ffe787762 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["node_ports.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["node_ports_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go new file mode 100644 index 00000000000..f97a2508c15 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeports + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// NodePorts is a plugin that checks if a node has free ports for the requested pod ports. +type NodePorts struct{} + +var _ framework.FilterPlugin = &NodePorts{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "NodePorts" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodePorts) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *NodePorts) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + _, reasons, err := predicates.PodFitsHostPorts(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &NodePorts{}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD similarity index 55% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD index bcb1961dd77..ee60c6b6728 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD @@ -2,30 +2,27 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["mount_pod.go"], - importpath = "k8s.io/kubernetes/pkg/kubelet/mountpod", + srcs = ["node_prefer_avoid_pods.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods", visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/config:go_default_library", - "//pkg/kubelet/pod:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/utils/strings:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["mount_pod_test.go"], + srcs = ["node_prefer_avoid_pods_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/configmap:go_default_library", - "//pkg/kubelet/pod:go_default_library", - "//pkg/kubelet/pod/testing:go_default_library", - "//pkg/kubelet/secret:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods.go new file mode 100644 index 00000000000..ce62bb34b4e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodepreferavoidpods + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// NodePreferAvoidPods is a plugin that priorities nodes according to the node annotation +// "scheduler.alpha.kubernetes.io/preferAvoidPods". +type NodePreferAvoidPods struct { + handle framework.FrameworkHandle +} + +var _ framework.ScorePlugin = &NodePreferAvoidPods{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "NodePreferAvoidPods" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodePreferAvoidPods) Name() string { + return Name +} + +// Score invoked at the score extension point. +func (pl *NodePreferAvoidPods) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := priorities.CalculateNodePreferAvoidPodsPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *NodePreferAvoidPods) ScoreExtensions() framework.ScoreExtensions { + return nil +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &NodePreferAvoidPods{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD new file mode 100644 index 00000000000..aa2751836fb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD @@ -0,0 +1,65 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "balanced_allocation.go", + "fit.go", + "least_allocated.go", + "most_allocated.go", + "test_util.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "balanced_allocation_test.go", + "fit_test.go", + "least_allocated_test.go", + "most_allocated_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go new file mode 100644 index 00000000000..e0984f2835c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package noderesources + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction +// of capacity, and prioritizes the host based on how close the two metrics are to each other. +type BalancedAllocation struct { + handle framework.FrameworkHandle +} + +var _ = framework.ScorePlugin(&BalancedAllocation{}) + +// BalancedAllocationName is the name of the plugin used in the plugin registry and configurations. +const BalancedAllocationName = "NodeResourcesBalancedAllocation" + +// Name returns name of the plugin. It is used in logs, etc. +func (ba *BalancedAllocation) Name() string { + return BalancedAllocationName +} + +// Score invoked at the score extension point. +func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := ba.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + // BalancedResourceAllocationMap does not use priority metadata, hence we pass nil here + s, err := priorities.BalancedResourceAllocationMap(pod, nil, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions { + return nil +} + +// NewBalancedAllocation initializes a new plugin and returns it. +func NewBalancedAllocation(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &BalancedAllocation{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go new file mode 100644 index 00000000000..67327c8c744 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package noderesources + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// Fit is a plugin that checks if a node has sufficient resources. +type Fit struct{} + +var _ framework.FilterPlugin = &Fit{} + +// FitName is the name of the plugin used in the plugin registry and configurations. +const FitName = "NodeResourcesFit" + +// Name returns name of the plugin. It is used in logs, etc. +func (f *Fit) Name() string { + return FitName +} + +// Filter invoked at the filter extension point. +func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata) + if !ok { + return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState)) + } + _, reasons, err := predicates.PodFitsResources(pod, meta, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewFit initializes a new plugin and returns it. +func NewFit(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &Fit{}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go new file mode 100644 index 00000000000..6c8b7d7b195 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package noderesources + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources. +type LeastAllocated struct { + handle framework.FrameworkHandle +} + +var _ = framework.ScorePlugin(&LeastAllocated{}) + +// LeastAllocatedName is the name of the plugin used in the plugin registry and configurations. +const LeastAllocatedName = "NodeResourcesLeastAllocated" + +// Name returns name of the plugin. It is used in logs, etc. +func (la *LeastAllocated) Name() string { + return LeastAllocatedName +} + +// Score invoked at the score extension point. +func (la *LeastAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := la.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + // LeastRequestedPriorityMap does not use priority metadata, hence we pass nil here + s, err := priorities.LeastRequestedPriorityMap(pod, nil, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions { + return nil +} + +// NewLeastAllocated initializes a new plugin and returns it. +func NewLeastAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &LeastAllocated{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go new file mode 100644 index 00000000000..b4ef4d337c6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package noderesources + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources. +type MostAllocated struct { + handle framework.FrameworkHandle +} + +var _ = framework.ScorePlugin(&MostAllocated{}) + +// MostAllocatedName is the name of the plugin used in the plugin registry and configurations. +const MostAllocatedName = "NodeResourcesMostAllocated" + +// Name returns name of the plugin. It is used in logs, etc. +func (ma *MostAllocated) Name() string { + return MostAllocatedName +} + +// Score invoked at the Score extension point. +func (ma *MostAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := ma.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + // MostRequestedPriorityMap does not use priority metadata, hence we pass nil here + s, err := priorities.MostRequestedPriorityMap(pod, nil, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions { + return nil +} + +// NewMostAllocated initializes a new plugin and returns it. +func NewMostAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &MostAllocated{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go new file mode 100644 index 00000000000..bd77f6a0bc6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package noderesources + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func makeNode(node string, milliCPU, memory int64) *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: node}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + }, + }, + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD new file mode 100644 index 00000000000..76c1f649e14 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["node_unschedulable.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["node_unschedulable_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go new file mode 100644 index 00000000000..749a6b07b28 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeunschedulable + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// NodeUnschedulable is a plugin that priorities nodes according to the node annotation +// "scheduler.alpha.kubernetes.io/preferAvoidPods". +type NodeUnschedulable struct { +} + +var _ framework.FilterPlugin = &NodeUnschedulable{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "NodeUnschedulable" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *NodeUnschedulable) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + _, reasons, err := predicates.CheckNodeUnschedulablePredicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &NodeUnschedulable{}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD new file mode 100644 index 00000000000..671040e97c2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD @@ -0,0 +1,70 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "azure.go", + "cinder.go", + "csi.go", + "csinode_helper.go", + "ebs.go", + "gce.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits", + visibility = ["//visibility:public"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "azure_test.go", + "cinder_test.go", + "csi_test.go", + "ebs_test.go", + "gce_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go new file mode 100644 index 00000000000..2417bc7e0b1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// AzureDiskLimits is a plugin that checks node volume limits. +type AzureDiskLimits struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &AzureDiskLimits{} + +// AzureDiskName is the name of the plugin used in the plugin registry and configurations. +const AzureDiskName = "AzureDiskLimits" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *AzureDiskLimits) Name() string { + return AzureDiskName +} + +// Filter invoked at the filter extension point. +func (pl *AzureDiskLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewAzureDisk returns function that initializes a new plugin and returns it. +func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + + return &AzureDiskLimits{ + predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go new file mode 100644 index 00000000000..6b584ad8046 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// CinderLimits is a plugin that checks node volume limits. +type CinderLimits struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &CinderLimits{} + +// CinderName is the name of the plugin used in the plugin registry and configurations. +const CinderName = "CinderLimits" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *CinderLimits) Name() string { + return CinderName +} + +// Filter invoked at the filter extension point. +func (pl *CinderLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewCinder returns function that initializes a new plugin and returns it. +func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + return &CinderLimits{ + predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go new file mode 100644 index 00000000000..40062e5cb81 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// CSILimits is a plugin that checks node volume limits. +type CSILimits struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &CSILimits{} + +// CSIName is the name of the plugin used in the plugin registry and configurations. +const CSIName = "NodeVolumeLimits" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *CSILimits) Name() string { + return CSIName +} + +// Filter invoked at the filter extension point. +func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewCSI initializes a new plugin and returns it. +func NewCSI(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + + return &CSILimits{ + predicate: predicates.NewCSIMaxVolumeLimitPredicate(getCSINodeListerIfEnabled(informerFactory), pvLister, pvcLister, scLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go new file mode 100644 index 00000000000..196aa3db34d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + storagelisters "k8s.io/client-go/listers/storage/v1" + kubefeatures "k8s.io/kubernetes/pkg/features" +) + +// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled +func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister { + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) { + return nil + } + return factory.Storage().V1().CSINodes().Lister() +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go new file mode 100644 index 00000000000..b80f884abeb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// EBSLimits is a plugin that checks node volume limits. +type EBSLimits struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &EBSLimits{} + +// EBSName is the name of the plugin used in the plugin registry and configurations. +const EBSName = "EBSLimits" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *EBSLimits) Name() string { + return EBSName +} + +// Filter invoked at the filter extension point. +func (pl *EBSLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewEBS returns function that initializes a new plugin and returns it. +func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + + return &EBSLimits{ + predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go new file mode 100644 index 00000000000..a5730d97784 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// GCEPDLimits is a plugin that checks node volume limits. +type GCEPDLimits struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &GCEPDLimits{} + +// GCEPDName is the name of the plugin used in the plugin registry and configurations. +const GCEPDName = "GCEPDLimits" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *GCEPDLimits) Name() string { + return GCEPDName +} + +// Filter invoked at the filter extension point. +func (pl *GCEPDLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewGCEPD returns function that initializes a new plugin and returns it. +func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + + return &GCEPDLimits{ + predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD new file mode 100644 index 00000000000..9dd7642c2ee --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["pod_topology_spread.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["pod_topology_spread_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/scheduler/testing:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread.go new file mode 100644 index 00000000000..2e7f3616173 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podtopologyspread + +import ( + "context" + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied. +type PodTopologySpread struct { + handle framework.FrameworkHandle +} + +var _ framework.FilterPlugin = &PodTopologySpread{} +var _ framework.ScorePlugin = &PodTopologySpread{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "PodTopologySpread" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *PodTopologySpread) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata) + if !ok { + return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState)) + } + _, reasons, err := predicates.EvenPodsSpreadPredicate(pod, meta, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the Score extension point. +// The "score" returned in this function is the matching number of pods on the `nodeName`, +// it is normalized later. +func (pl *PodTopologySpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := priorities.CalculateEvenPodsSpreadPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + meta := migration.PriorityMetadata(state) + err := priorities.CalculateEvenPodsSpreadPriorityReduce(pod, meta, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *PodTopologySpread) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &PodTopologySpread{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/BUILD new file mode 100644 index 00000000000..d4c9f2f8891 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["requested_to_capacity_ratio.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["requested_to_capacity_ratio_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio.go new file mode 100644 index 00000000000..35568aef442 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requestedtocapacityratio + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// Name of this plugin. +const Name = "RequestedToCapacityRatio" + +// Args holds the args that are used to configure the plugin. +type Args struct { + FunctionShape priorities.FunctionShape + ResourceToWeightMap priorities.ResourceToWeightMap +} + +// New initializes a new plugin and returns it. +func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + args := &Args{} + if err := framework.DecodeInto(plArgs, args); err != nil { + return nil, err + } + p := priorities.RequestedToCapacityRatioResourceAllocationPriority(args.FunctionShape, args.ResourceToWeightMap) + return &RequestedToCapacityRatio{ + handle: handle, + prioritize: p.PriorityMap, + }, nil +} + +// RequestedToCapacityRatio is a score plugin that allow users to apply bin packing +// on core resources like CPU, Memory as well as extended resources like accelerators. +type RequestedToCapacityRatio struct { + handle framework.FrameworkHandle + prioritize priorities.PriorityMapFunction +} + +var _ framework.ScorePlugin = &RequestedToCapacityRatio{} + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *RequestedToCapacityRatio) Name() string { + return Name +} + +// Score invoked at the score extension point. +func (pl *RequestedToCapacityRatio) Score(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + // Note that RequestedToCapacityRatioPriority doesn't use priority metadata, hence passing nil here. + s, err := pl.prioritize(pod, nil, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *RequestedToCapacityRatio) ScoreExtensions() framework.ScoreExtensions { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD new file mode 100644 index 00000000000..ab27433dffb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["service_affinity.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["service_affinity_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go new file mode 100644 index 00000000000..cbe06beed92 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaffinity + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "ServiceAffinity" + +// Args holds the args that are used to configure the plugin. +type Args struct { + // Labels are homogeneous for pods that are scheduled to a node. + // (i.e. it returns true IFF this pod can be added to this node such that all other pods in + // the same service are running on nodes with the exact same values for Labels). + AffinityLabels []string `json:"labels,omitempty"` + // AntiAffinityLabelsPreference are the labels to consider for service anti affinity scoring. + AntiAffinityLabelsPreference []string `json:"antiAffinityLabelsPreference,omitempty"` +} + +// New initializes a new plugin and returns it. +func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + args := &Args{} + if err := framework.DecodeInto(plArgs, args); err != nil { + return nil, err + } + informerFactory := handle.SharedInformerFactory() + nodeInfoLister := handle.SnapshotSharedLister().NodeInfos() + podLister := handle.SnapshotSharedLister().Pods() + serviceLister := informerFactory.Core().V1().Services().Lister() + + fitPredicate, predicateMetadataProducer := predicates.NewServiceAffinityPredicate(nodeInfoLister, podLister, serviceLister, args.AffinityLabels) + // Once we generate the predicate we should also Register the Precomputation + predicates.RegisterPredicateMetadataProducer(predicates.CheckServiceAffinityPred, predicateMetadataProducer) + + priorityMapFunction, priorityReduceFunction := priorities.NewServiceAntiAffinityPriority(podLister, serviceLister, args.AntiAffinityLabelsPreference) + + return &ServiceAffinity{ + handle: handle, + predicate: fitPredicate, + priorityMapFunction: priorityMapFunction, + priorityReduceFunction: priorityReduceFunction, + }, nil +} + +// ServiceAffinity is a plugin that checks service affinity. +type ServiceAffinity struct { + handle framework.FrameworkHandle + predicate predicates.FitPredicate + priorityMapFunction priorities.PriorityMapFunction + priorityReduceFunction priorities.PriorityReduceFunction +} + +var _ framework.FilterPlugin = &ServiceAffinity{} +var _ framework.ScorePlugin = &ServiceAffinity{} + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *ServiceAffinity) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata) + if !ok { + return framework.NewStatus(framework.Error, "looking up Metadata") + } + _, reasons, err := pl.predicate(pod, meta, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the Score extension point. +func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + meta := migration.PriorityMetadata(state) + s, err := pl.priorityMapFunction(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + // Note that priorityReduceFunction doesn't use priority metadata, hence passing nil here. + err := pl.priorityReduceFunction(pod, nil, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions { + return pl +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/BUILD new file mode 100644 index 00000000000..474cf908e74 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["taint_toleration.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["taint_toleration_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go new file mode 100644 index 00000000000..1e85086dd3e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tainttoleration + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// TaintToleration is a plugin that checks if a pod tolerates a node's taints. +type TaintToleration struct { + handle framework.FrameworkHandle +} + +var _ framework.FilterPlugin = &TaintToleration{} +var _ framework.ScorePlugin = &TaintToleration{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "TaintToleration" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *TaintToleration) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // Note that PodToleratesNodeTaints doesn't use predicate metadata, hence passing nil here. + _, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// Score invoked at the Score extension point. +func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + meta := migration.PriorityMetadata(state) + s, err := priorities.ComputeTaintTolerationPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + // Note that ComputeTaintTolerationPriorityReduce doesn't use priority metadata, hence passing nil here. + err := priorities.ComputeTaintTolerationPriorityReduce(pod, nil, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *TaintToleration) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { + return &TaintToleration{handle: h}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/BUILD new file mode 100644 index 00000000000..a108c6b984f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["volume_binding.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go new file mode 100644 index 00000000000..2738a31ff19 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumebinding + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" +) + +// VolumeBinding is a plugin that binds pod volumes in scheduling. +type VolumeBinding struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &VolumeBinding{} + +// Name is the name of the plugin used in Registry and configurations. +const Name = "VolumeBinding" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *VolumeBinding) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// NewFromVolumeBinder initializes a new plugin with volume binder and returns it. +func NewFromVolumeBinder(volumeBinder *volumebinder.VolumeBinder) framework.Plugin { + return &VolumeBinding{ + predicate: predicates.NewVolumeBindingPredicate(volumeBinder), + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/BUILD new file mode 100644 index 00000000000..4d7d651454e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["volume_restrictions.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["volume_restrictions_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go new file mode 100644 index 00000000000..a4dea04d57f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumerestrictions + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// VolumeRestrictions is a plugin that checks volume restrictions. +type VolumeRestrictions struct{} + +var _ framework.FilterPlugin = &VolumeRestrictions{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "VolumeRestrictions" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *VolumeRestrictions) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed for NoDiskConflict + _, reasons, err := predicates.NoDiskConflict(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &VolumeRestrictions{}, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/BUILD new file mode 100644 index 00000000000..8819aab7389 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["volume_zone.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["volume_zone_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go new file mode 100644 index 00000000000..666ade7ae4c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumezone + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// VolumeZone is a plugin that checks volume zone. +type VolumeZone struct { + predicate predicates.FitPredicate +} + +var _ framework.FilterPlugin = &VolumeZone{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "VolumeZone" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *VolumeZone) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + return &VolumeZone{ + predicate: predicates.NewVolumeZonePredicate(pvLister, pvcLister, scLister), + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/BUILD index 6554c53dd63..761b0df22ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/BUILD @@ -3,24 +3,30 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "context.go", + "cycle_state.go", "framework.go", "interface.go", + "metrics_recorder.go", "registry.go", "waiting_pods_map.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", visibility = ["//visibility:public"], deps = [ - "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//staging/src/k8s.io/component-base/metrics:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], @@ -43,6 +49,7 @@ filegroup( go_test( name = "go_default_test", srcs = [ + "cycle_state_test.go", "framework_test.go", "interface_test.go", "registry_test.go", @@ -51,8 +58,13 @@ go_test( deps = [ "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config/scheme:go_default_library", + "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/context.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/context.go deleted file mode 100644 index 0dfe6a47335..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/context.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "errors" - "sync" -) - -const ( - // NotFound is the not found error message. - NotFound = "not found" -) - -// ContextData is a generic type for arbitrary data stored in PluginContext. -type ContextData interface{} - -// ContextKey is the type of keys stored in PluginContext. -type ContextKey string - -// PluginContext provides a mechanism for plugins to store and retrieve arbitrary data. -// ContextData stored by one plugin can be read, altered, or deleted by another plugin. -// PluginContext does not provide any data protection, as all plugins are assumed to be -// trusted. -type PluginContext struct { - mx sync.RWMutex - storage map[ContextKey]ContextData -} - -// NewPluginContext initializes a new PluginContext and returns its pointer. -func NewPluginContext() *PluginContext { - return &PluginContext{ - storage: make(map[ContextKey]ContextData), - } -} - -// Read retrieves data with the given "key" from PluginContext. If the key is not -// present an error is returned. -// This function is not thread safe. In multi-threaded code, lock should be -// acquired first. -func (c *PluginContext) Read(key ContextKey) (ContextData, error) { - if v, ok := c.storage[key]; ok { - return v, nil - } - return nil, errors.New(NotFound) -} - -// Write stores the given "val" in PluginContext with the given "key". -// This function is not thread safe. In multi-threaded code, lock should be -// acquired first. -func (c *PluginContext) Write(key ContextKey, val ContextData) { - c.storage[key] = val -} - -// Delete deletes data with the given key from PluginContext. -// This function is not thread safe. In multi-threaded code, lock should be -// acquired first. -func (c *PluginContext) Delete(key ContextKey) { - delete(c.storage, key) -} - -// Lock acquires PluginContext lock. -func (c *PluginContext) Lock() { - c.mx.Lock() -} - -// Unlock releases PluginContext lock. -func (c *PluginContext) Unlock() { - c.mx.Unlock() -} - -// RLock acquires PluginContext read lock. -func (c *PluginContext) RLock() { - c.mx.RLock() -} - -// RUnlock releases PluginContext read lock. -func (c *PluginContext) RUnlock() { - c.mx.RUnlock() -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/cycle_state.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/cycle_state.go new file mode 100644 index 00000000000..be62a81e4a9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/cycle_state.go @@ -0,0 +1,130 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "errors" + "sync" +) + +const ( + // NotFound is the not found error message. + NotFound = "not found" +) + +// StateData is a generic type for arbitrary data stored in CycleState. +type StateData interface { + // Clone is an interface to make a copy of StateData. For performance reasons, + // clone should make shallow copies for members (e.g., slices or maps) that are not + // impacted by PreFilter's optional AddPod/RemovePod methods. + Clone() StateData +} + +// StateKey is the type of keys stored in CycleState. +type StateKey string + +// CycleState provides a mechanism for plugins to store and retrieve arbitrary data. +// StateData stored by one plugin can be read, altered, or deleted by another plugin. +// CycleState does not provide any data protection, as all plugins are assumed to be +// trusted. +type CycleState struct { + mx sync.RWMutex + storage map[StateKey]StateData + // if recordFrameworkMetrics is true, framework metrics will be recorded for this cycle. + recordFrameworkMetrics bool +} + +// NewCycleState initializes a new CycleState and returns its pointer. +func NewCycleState() *CycleState { + return &CycleState{ + storage: make(map[StateKey]StateData), + } +} + +// ShouldRecordFrameworkMetrics returns whether framework metrics should be recorded. +func (c *CycleState) ShouldRecordFrameworkMetrics() bool { + if c == nil { + return false + } + return c.recordFrameworkMetrics +} + +// SetRecordFrameworkMetrics sets recordFrameworkMetrics to the given value. +func (c *CycleState) SetRecordFrameworkMetrics(flag bool) { + if c == nil { + return + } + c.recordFrameworkMetrics = flag +} + +// Clone creates a copy of CycleState and returns its pointer. Clone returns +// nil if the context being cloned is nil. +func (c *CycleState) Clone() *CycleState { + if c == nil { + return nil + } + copy := NewCycleState() + for k, v := range c.storage { + copy.Write(k, v.Clone()) + } + return copy +} + +// Read retrieves data with the given "key" from CycleState. If the key is not +// present an error is returned. +// This function is not thread safe. In multi-threaded code, lock should be +// acquired first. +func (c *CycleState) Read(key StateKey) (StateData, error) { + if v, ok := c.storage[key]; ok { + return v, nil + } + return nil, errors.New(NotFound) +} + +// Write stores the given "val" in CycleState with the given "key". +// This function is not thread safe. In multi-threaded code, lock should be +// acquired first. +func (c *CycleState) Write(key StateKey, val StateData) { + c.storage[key] = val +} + +// Delete deletes data with the given key from CycleState. +// This function is not thread safe. In multi-threaded code, lock should be +// acquired first. +func (c *CycleState) Delete(key StateKey) { + delete(c.storage, key) +} + +// Lock acquires CycleState lock. +func (c *CycleState) Lock() { + c.mx.Lock() +} + +// Unlock releases CycleState lock. +func (c *CycleState) Unlock() { + c.mx.Unlock() +} + +// RLock acquires CycleState read lock. +func (c *CycleState) RLock() { + c.mx.RLock() +} + +// RUnlock releases CycleState read lock. +func (c *CycleState) RUnlock() { + c.mx.RUnlock() +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go index cb2aca2c18b..7fd263befb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go @@ -19,82 +19,183 @@ package v1alpha1 import ( "context" "fmt" + "reflect" "time" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/workqueue" "k8s.io/klog" "k8s.io/kubernetes/pkg/scheduler/apis/config" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" + "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) +const ( + // Specifies the maximum timeout a permit plugin can return. + maxTimeout time.Duration = 15 * time.Minute + preFilter = "PreFilter" + preFilterExtensionAddPod = "PreFilterExtensionAddPod" + preFilterExtensionRemovePod = "PreFilterExtensionRemovePod" + filter = "Filter" + postFilter = "PostFilter" + score = "Score" + scoreExtensionNormalize = "ScoreExtensionNormalize" + preBind = "PreBind" + bind = "Bind" + postBind = "PostBind" + reserve = "Reserve" + unreserve = "Unreserve" + permit = "Permit" +) + // framework is the component responsible for initializing and running scheduler // plugins. type framework struct { - registry Registry - nodeInfoSnapshot *schedulernodeinfo.Snapshot - waitingPods *waitingPodsMap - pluginNameToWeightMap map[string]int - queueSortPlugins []QueueSortPlugin - preFilterPlugins []PreFilterPlugin - filterPlugins []FilterPlugin - postFilterPlugins []PostFilterPlugin - scorePlugins []ScorePlugin - scoreWithNormalizePlugins []ScoreWithNormalizePlugin - reservePlugins []ReservePlugin - preBindPlugins []PreBindPlugin - bindPlugins []BindPlugin - postBindPlugins []PostBindPlugin - unreservePlugins []UnreservePlugin - permitPlugins []PermitPlugin + registry Registry + snapshotSharedLister schedulerlisters.SharedLister + waitingPods *waitingPodsMap + pluginNameToWeightMap map[string]int + queueSortPlugins []QueueSortPlugin + preFilterPlugins []PreFilterPlugin + filterPlugins []FilterPlugin + postFilterPlugins []PostFilterPlugin + scorePlugins []ScorePlugin + reservePlugins []ReservePlugin + preBindPlugins []PreBindPlugin + bindPlugins []BindPlugin + postBindPlugins []PostBindPlugin + unreservePlugins []UnreservePlugin + permitPlugins []PermitPlugin + + clientSet clientset.Interface + informerFactory informers.SharedInformerFactory + + metricsRecorder *metricsRecorder } -const ( - // Specifies the maximum timeout a permit plugin can return. - maxTimeout time.Duration = 15 * time.Minute -) +// extensionPoint encapsulates desired and applied set of plugins at a specific extension +// point. This is used to simplify iterating over all extension points supported by the +// framework. +type extensionPoint struct { + // the set of plugins to be configured at this extension point. + plugins *config.PluginSet + // a pointer to the slice storing plugins implementations that will run at this + // extenstion point. + slicePtr interface{} +} + +func (f *framework) getExtensionPoints(plugins *config.Plugins) []extensionPoint { + return []extensionPoint{ + {plugins.PreFilter, &f.preFilterPlugins}, + {plugins.Filter, &f.filterPlugins}, + {plugins.Reserve, &f.reservePlugins}, + {plugins.PostFilter, &f.postFilterPlugins}, + {plugins.Score, &f.scorePlugins}, + {plugins.PreBind, &f.preBindPlugins}, + {plugins.Bind, &f.bindPlugins}, + {plugins.PostBind, &f.postBindPlugins}, + {plugins.Unreserve, &f.unreservePlugins}, + {plugins.Permit, &f.permitPlugins}, + {plugins.QueueSort, &f.queueSortPlugins}, + } +} + +type frameworkOptions struct { + clientSet clientset.Interface + informerFactory informers.SharedInformerFactory + snapshotSharedLister schedulerlisters.SharedLister + metricsRecorder *metricsRecorder +} + +// Option for the framework. +type Option func(*frameworkOptions) + +// WithClientSet sets clientSet for the scheduling framework. +func WithClientSet(clientSet clientset.Interface) Option { + return func(o *frameworkOptions) { + o.clientSet = clientSet + } +} + +// WithInformerFactory sets informer factory for the scheduling framework. +func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option { + return func(o *frameworkOptions) { + o.informerFactory = informerFactory + } +} + +// WithSnapshotSharedLister sets the SharedLister of the snapshot. +func WithSnapshotSharedLister(snapshotSharedLister schedulerlisters.SharedLister) Option { + return func(o *frameworkOptions) { + o.snapshotSharedLister = snapshotSharedLister + } +} + +// withMetricsRecorder is only used in tests. +func withMetricsRecorder(recorder *metricsRecorder) Option { + return func(o *frameworkOptions) { + o.metricsRecorder = recorder + } +} -var _ = Framework(&framework{}) +var defaultFrameworkOptions = frameworkOptions{ + metricsRecorder: newMetricsRecorder(1000, time.Second), +} + +var _ Framework = &framework{} // NewFramework initializes plugins given the configuration and the registry. -func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfig) (Framework, error) { +func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfig, opts ...Option) (Framework, error) { + options := defaultFrameworkOptions + for _, opt := range opts { + opt(&options) + } + f := &framework{ registry: r, - nodeInfoSnapshot: schedulernodeinfo.NewSnapshot(), + snapshotSharedLister: options.snapshotSharedLister, pluginNameToWeightMap: make(map[string]int), waitingPods: newWaitingPodsMap(), + clientSet: options.clientSet, + informerFactory: options.informerFactory, + metricsRecorder: options.metricsRecorder, } if plugins == nil { return f, nil } // get needed plugins from config - pg := pluginsNeeded(plugins) + pg := f.pluginsNeeded(plugins) if len(pg) == 0 { return f, nil } - pluginConfig := pluginNameToConfig(args) + pluginConfig := make(map[string]*runtime.Unknown, 0) + for i := range args { + pluginConfig[args[i].Name] = &args[i].Args + } + pluginsMap := make(map[string]Plugin) for name, factory := range r { - // initialize only needed plugins + // initialize only needed plugins. if _, ok := pg[name]; !ok { continue } - // find the config args of a plugin - pc := pluginConfig[name] - - p, err := factory(pc, f) + p, err := factory(pluginConfig[name], f) if err != nil { return nil, fmt.Errorf("error initializing plugin %q: %v", name, err) } pluginsMap[name] = p - // A weight of zero is not permitted, plugins can be disabled explicitly + // a weight of zero is not permitted, plugins can be disabled explicitly // when configured. f.pluginNameToWeightMap[name] = int(pg[name].Weight) if f.pluginNameToWeightMap[name] == 0 { @@ -102,175 +203,55 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi } } - if plugins.PreFilter != nil { - for _, pf := range plugins.PreFilter.Enabled { - if pg, ok := pluginsMap[pf.Name]; ok { - p, ok := pg.(PreFilterPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend prefilter plugin", pf.Name) - } - f.preFilterPlugins = append(f.preFilterPlugins, p) - } else { - return nil, fmt.Errorf("prefilter plugin %q does not exist", pf.Name) - } - } - } - - if plugins.Filter != nil { - for _, r := range plugins.Filter.Enabled { - if pg, ok := pluginsMap[r.Name]; ok { - p, ok := pg.(FilterPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend filter plugin", r.Name) - } - f.filterPlugins = append(f.filterPlugins, p) - } else { - return nil, fmt.Errorf("filter plugin %q does not exist", r.Name) - } + for _, e := range f.getExtensionPoints(plugins) { + if err := updatePluginList(e.slicePtr, e.plugins, pluginsMap); err != nil { + return nil, err } } - if plugins.Score != nil { - for _, sc := range plugins.Score.Enabled { - if pg, ok := pluginsMap[sc.Name]; ok { - // First, make sure the plugin implements ScorePlugin interface. - p, ok := pg.(ScorePlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend score plugin", sc.Name) - } - if f.pluginNameToWeightMap[p.Name()] == 0 { - return nil, fmt.Errorf("score plugin %q is not configured with weight", p.Name()) - } - f.scorePlugins = append(f.scorePlugins, p) - - // Next, if the plugin also implements ScoreWithNormalizePlugin interface, - // add it to the normalizeScore plugin list. - np, ok := pg.(ScoreWithNormalizePlugin) - if ok { - f.scoreWithNormalizePlugins = append(f.scoreWithNormalizePlugins, np) - } - } else { - return nil, fmt.Errorf("score plugin %q does not exist", sc.Name) - } + // Verifying the score weights again since Plugin.Name() could return a different + // value from the one used in the configuration. + for _, scorePlugin := range f.scorePlugins { + if f.pluginNameToWeightMap[scorePlugin.Name()] == 0 { + return nil, fmt.Errorf("score plugin %q is not configured with weight", scorePlugin.Name()) } } - if plugins.Reserve != nil { - for _, r := range plugins.Reserve.Enabled { - if pg, ok := pluginsMap[r.Name]; ok { - p, ok := pg.(ReservePlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend reserve plugin", r.Name) - } - f.reservePlugins = append(f.reservePlugins, p) - } else { - return nil, fmt.Errorf("reserve plugin %q does not exist", r.Name) - } - } + if len(f.queueSortPlugins) > 1 { + return nil, fmt.Errorf("only one queue sort plugin can be enabled") } - if plugins.PostFilter != nil { - for _, r := range plugins.PostFilter.Enabled { - if pg, ok := pluginsMap[r.Name]; ok { - p, ok := pg.(PostFilterPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend post-filter plugin", r.Name) - } - f.postFilterPlugins = append(f.postFilterPlugins, p) - } else { - return nil, fmt.Errorf("post-filter plugin %q does not exist", r.Name) - } - } - } + return f, nil +} - if plugins.PreBind != nil { - for _, pb := range plugins.PreBind.Enabled { - if pg, ok := pluginsMap[pb.Name]; ok { - p, ok := pg.(PreBindPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend prebind plugin", pb.Name) - } - f.preBindPlugins = append(f.preBindPlugins, p) - } else { - return nil, fmt.Errorf("prebind plugin %q does not exist", pb.Name) - } - } +func updatePluginList(pluginList interface{}, pluginSet *config.PluginSet, pluginsMap map[string]Plugin) error { + if pluginSet == nil { + return nil } - if plugins.Bind != nil { - for _, pb := range plugins.Bind.Enabled { - if pg, ok := pluginsMap[pb.Name]; ok { - p, ok := pg.(BindPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend bind plugin", pb.Name) - } - f.bindPlugins = append(f.bindPlugins, p) - } else { - return nil, fmt.Errorf("bind plugin %q does not exist", pb.Name) - } + plugins := reflect.ValueOf(pluginList).Elem() + pluginType := plugins.Type().Elem() + set := sets.NewString() + for _, ep := range pluginSet.Enabled { + pg, ok := pluginsMap[ep.Name] + if !ok { + return fmt.Errorf("%s %q does not exist", pluginType.Name(), ep.Name) } - } - if plugins.PostBind != nil { - for _, pb := range plugins.PostBind.Enabled { - if pg, ok := pluginsMap[pb.Name]; ok { - p, ok := pg.(PostBindPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend postbind plugin", pb.Name) - } - f.postBindPlugins = append(f.postBindPlugins, p) - } else { - return nil, fmt.Errorf("postbind plugin %q does not exist", pb.Name) - } + if !reflect.TypeOf(pg).Implements(pluginType) { + return fmt.Errorf("plugin %q does not extend %s plugin", ep.Name, pluginType.Name()) } - } - if plugins.Unreserve != nil { - for _, ur := range plugins.Unreserve.Enabled { - if pg, ok := pluginsMap[ur.Name]; ok { - p, ok := pg.(UnreservePlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend unreserve plugin", ur.Name) - } - f.unreservePlugins = append(f.unreservePlugins, p) - } else { - return nil, fmt.Errorf("unreserve plugin %q does not exist", ur.Name) - } + if set.Has(ep.Name) { + return fmt.Errorf("plugin %q already registered as %q", ep.Name, pluginType.Name()) } - } - if plugins.Permit != nil { - for _, pr := range plugins.Permit.Enabled { - if pg, ok := pluginsMap[pr.Name]; ok { - p, ok := pg.(PermitPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend permit plugin", pr.Name) - } - f.permitPlugins = append(f.permitPlugins, p) - } else { - return nil, fmt.Errorf("permit plugin %q does not exist", pr.Name) - } - } - } + set.Insert(ep.Name) - if plugins.QueueSort != nil { - for _, qs := range plugins.QueueSort.Enabled { - if pg, ok := pluginsMap[qs.Name]; ok { - p, ok := pg.(QueueSortPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q does not extend queue sort plugin", qs.Name) - } - f.queueSortPlugins = append(f.queueSortPlugins, p) - if len(f.queueSortPlugins) > 1 { - return nil, fmt.Errorf("only one queue sort plugin can be enabled") - } - } else { - return nil, fmt.Errorf("queue sort plugin %q does not exist", qs.Name) - } - } + newPlugins := reflect.Append(plugins, reflect.ValueOf(pg)) + plugins.Set(newPlugins) } - - return f, nil + return nil } // QueueSortFunc returns the function to sort pods in scheduling queue @@ -287,10 +268,15 @@ func (f *framework) QueueSortFunc() LessFunc { // *Status and its code is set to non-success if any of the plugins returns // anything but Success. If a non-success status is returned, then the scheduling // cycle is aborted. -func (f *framework) RunPreFilterPlugins( - pc *PluginContext, pod *v1.Pod) *Status { +func (f *framework) RunPreFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(preFilter, status, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.preFilterPlugins { - status := pl.PreFilter(pc, pod) + status = f.runPreFilterPlugin(ctx, pl, state, pod) if !status.IsSuccess() { if status.IsUnschedulable() { msg := fmt.Sprintf("rejected by %q at prefilter: %v", pl.Name(), status.Message()) @@ -306,14 +292,118 @@ func (f *framework) RunPreFilterPlugins( return nil } +func (f *framework) runPreFilterPlugin(ctx context.Context, pl PreFilterPlugin, state *CycleState, pod *v1.Pod) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.PreFilter(ctx, state, pod) + } + startTime := time.Now() + status := pl.PreFilter(ctx, state, pod) + f.metricsRecorder.observePluginDurationAsync(preFilter, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + +// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured +// PreFilter plugins. It returns directly if any of the plugins return any +// status other than Success. +func (f *framework) RunPreFilterExtensionAddPod( + ctx context.Context, + state *CycleState, + podToSchedule *v1.Pod, + podToAdd *v1.Pod, + nodeInfo *schedulernodeinfo.NodeInfo, +) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(preFilterExtensionAddPod, status, metrics.SinceInSeconds(startTime)) + }() + } + for _, pl := range f.preFilterPlugins { + if pl.PreFilterExtensions() == nil { + continue + } + status = f.runPreFilterExtensionAddPod(ctx, pl, state, podToSchedule, podToAdd, nodeInfo) + if !status.IsSuccess() { + msg := fmt.Sprintf("error while running AddPod for plugin %q while scheduling pod %q: %v", + pl.Name(), podToSchedule.Name, status.Message()) + klog.Error(msg) + return NewStatus(Error, msg) + } + } + + return nil +} + +func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo) + } + startTime := time.Now() + status := pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo) + f.metricsRecorder.observePluginDurationAsync(preFilterExtensionAddPod, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + +// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured +// PreFilter plugins. It returns directly if any of the plugins return any +// status other than Success. +func (f *framework) RunPreFilterExtensionRemovePod( + ctx context.Context, + state *CycleState, + podToSchedule *v1.Pod, + podToRemove *v1.Pod, + nodeInfo *schedulernodeinfo.NodeInfo, +) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(preFilterExtensionRemovePod, status, metrics.SinceInSeconds(startTime)) + }() + } + for _, pl := range f.preFilterPlugins { + if pl.PreFilterExtensions() == nil { + continue + } + status = f.runPreFilterExtensionRemovePod(ctx, pl, state, podToSchedule, podToRemove, nodeInfo) + if !status.IsSuccess() { + msg := fmt.Sprintf("error while running RemovePod for plugin %q while scheduling pod %q: %v", + pl.Name(), podToSchedule.Name, status.Message()) + klog.Error(msg) + return NewStatus(Error, msg) + } + } + + return nil +} + +func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo) + } + startTime := time.Now() + status := pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo) + f.metricsRecorder.observePluginDurationAsync(preFilterExtensionRemovePod, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunFilterPlugins runs the set of configured Filter plugins for pod on // the given node. If any of these plugins doesn't return "Success", the // given node is not suitable for running pod. // Meanwhile, the failure message and status are set for the given node. -func (f *framework) RunFilterPlugins(pc *PluginContext, - pod *v1.Pod, nodeName string) *Status { +func (f *framework) RunFilterPlugins( + ctx context.Context, + state *CycleState, + pod *v1.Pod, + nodeInfo *schedulernodeinfo.NodeInfo, +) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(filter, status, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.filterPlugins { - status := pl.Filter(pc, pod, nodeName) + status = f.runFilterPlugin(ctx, pl, state, pod, nodeInfo) if !status.IsSuccess() { if !status.IsUnschedulable() { errMsg := fmt.Sprintf("error while running %q filter plugin for pod %q: %v", @@ -328,17 +418,34 @@ func (f *framework) RunFilterPlugins(pc *PluginContext, return nil } +func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.Filter(ctx, state, pod, nodeInfo) + } + startTime := time.Now() + status := pl.Filter(ctx, state, pod, nodeInfo) + f.metricsRecorder.observePluginDurationAsync(filter, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunPostFilterPlugins runs the set of configured post-filter plugins. If any // of these plugins returns any status other than "Success", the given node is // rejected. The filteredNodeStatuses is the set of filtered nodes and their statuses. func (f *framework) RunPostFilterPlugins( - pc *PluginContext, + ctx context.Context, + state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap, -) *Status { +) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(postFilter, status, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.postFilterPlugins { - status := pl.PostFilter(pc, pod, nodes, filteredNodesStatuses) + status = f.runPostFilterPlugin(ctx, pl, state, pod, nodes, filteredNodesStatuses) if !status.IsSuccess() { msg := fmt.Sprintf("error while running %q postfilter plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) klog.Error(msg) @@ -349,30 +456,46 @@ func (f *framework) RunPostFilterPlugins( return nil } +func (f *framework) runPostFilterPlugin(ctx context.Context, pl PostFilterPlugin, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.PostFilter(ctx, state, pod, nodes, filteredNodesStatuses) + } + startTime := time.Now() + status := pl.PostFilter(ctx, state, pod, nodes, filteredNodesStatuses) + f.metricsRecorder.observePluginDurationAsync(postFilter, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunScorePlugins runs the set of configured scoring plugins. It returns a list that // stores for each scoring plugin name the corresponding NodeScoreList(s). // It also returns *Status, which is set to non-success if any of the plugins returns // a non-success status. -func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1.Node) (PluginToNodeScores, *Status) { +func (f *framework) RunScorePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node) (ps PluginToNodeScores, status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(score, status, metrics.SinceInSeconds(startTime)) + }() + } pluginToNodeScores := make(PluginToNodeScores, len(f.scorePlugins)) for _, pl := range f.scorePlugins { pluginToNodeScores[pl.Name()] = make(NodeScoreList, len(nodes)) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) errCh := schedutil.NewErrorChannel() // Run Score method for each node in parallel. workqueue.ParallelizeUntil(ctx, 16, len(nodes), func(index int) { for _, pl := range f.scorePlugins { nodeName := nodes[index].Name - score, status := pl.Score(pc, pod, nodeName) + s, status := f.runScorePlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { errCh.SendErrorWithCancel(fmt.Errorf(status.Message()), cancel) return } pluginToNodeScores[pl.Name()][index] = NodeScore{ Name: nodeName, - Score: score, + Score: int64(s), } } }) @@ -382,11 +505,14 @@ func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1. return nil, NewStatus(Error, msg) } - // Run NormalizeScore method for each ScoreWithNormalizePlugin in parallel. - workqueue.ParallelizeUntil(ctx, 16, len(f.scoreWithNormalizePlugins), func(index int) { - pl := f.scoreWithNormalizePlugins[index] + // Run NormalizeScore method for each ScorePlugin in parallel. + workqueue.ParallelizeUntil(ctx, 16, len(f.scorePlugins), func(index int) { + pl := f.scorePlugins[index] nodeScoreList := pluginToNodeScores[pl.Name()] - status := pl.NormalizeScore(pc, pod, nodeScoreList) + if pl.ScoreExtensions() == nil { + return + } + status := f.runScoreExtension(ctx, pl, state, pod, nodeScoreList) if !status.IsSuccess() { err := fmt.Errorf("normalize score plugin %q failed with error %v", pl.Name(), status.Message()) errCh.SendErrorWithCancel(err, cancel) @@ -408,12 +534,12 @@ func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1. for i, nodeScore := range nodeScoreList { // return error if score plugin returns invalid score. - if nodeScore.Score > MaxNodeScore || nodeScore.Score < MinNodeScore { + if nodeScore.Score > int64(MaxNodeScore) || nodeScore.Score < int64(MinNodeScore) { err := fmt.Errorf("score plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), nodeScore.Score, MinNodeScore, MaxNodeScore) errCh.SendErrorWithCancel(err, cancel) return } - nodeScoreList[i].Score = nodeScore.Score * weight + nodeScoreList[i].Score = nodeScore.Score * int64(weight) } }) if err := errCh.ReceiveError(); err != nil { @@ -425,19 +551,39 @@ func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1. return pluginToNodeScores, nil } +func (f *framework) runScorePlugin(ctx context.Context, pl ScorePlugin, state *CycleState, pod *v1.Pod, nodeName string) (int64, *Status) { + if !state.ShouldRecordFrameworkMetrics() { + return pl.Score(ctx, state, pod, nodeName) + } + startTime := time.Now() + s, status := pl.Score(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(score, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return s, status +} + +func (f *framework) runScoreExtension(ctx context.Context, pl ScorePlugin, state *CycleState, pod *v1.Pod, nodeScoreList NodeScoreList) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList) + } + startTime := time.Now() + status := pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList) + f.metricsRecorder.observePluginDurationAsync(scoreExtensionNormalize, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunPreBindPlugins runs the set of configured prebind plugins. It returns a // failure (bool) if any of the plugins returns an error. It also returns an // error containing the rejection message or the error occurred in the plugin. -func (f *framework) RunPreBindPlugins( - pc *PluginContext, pod *v1.Pod, nodeName string) *Status { +func (f *framework) RunPreBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(preBind, status, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.preBindPlugins { - status := pl.PreBind(pc, pod, nodeName) + status = f.runPreBindPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { - if status.IsUnschedulable() { - msg := fmt.Sprintf("rejected by %q at prebind: %v", pl.Name(), status.Message()) - klog.V(4).Infof(msg) - return NewStatus(status.Code(), msg) - } msg := fmt.Sprintf("error while running %q prebind plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) klog.Error(msg) return NewStatus(Error, msg) @@ -446,14 +592,29 @@ func (f *framework) RunPreBindPlugins( return nil } +func (f *framework) runPreBindPlugin(ctx context.Context, pl PreBindPlugin, state *CycleState, pod *v1.Pod, nodeName string) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.PreBind(ctx, state, pod, nodeName) + } + startTime := time.Now() + status := pl.PreBind(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(preBind, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunBindPlugins runs the set of configured bind plugins until one returns a non `Skip` status. -func (f *framework) RunBindPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status { +func (f *framework) RunBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(bind, status, metrics.SinceInSeconds(startTime)) + }() + } if len(f.bindPlugins) == 0 { return NewStatus(Skip, "") } - var status *Status for _, bp := range f.bindPlugins { - status = bp.Bind(pc, pod, nodeName) + status = f.runBindPlugin(ctx, bp, state, pod, nodeName) if status != nil && status.Code() == Skip { continue } @@ -467,21 +628,51 @@ func (f *framework) RunBindPlugins(pc *PluginContext, pod *v1.Pod, nodeName stri return status } +func (f *framework) runBindPlugin(ctx context.Context, bp BindPlugin, state *CycleState, pod *v1.Pod, nodeName string) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return bp.Bind(ctx, state, pod, nodeName) + } + startTime := time.Now() + status := bp.Bind(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(bind, bp.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunPostBindPlugins runs the set of configured postbind plugins. -func (f *framework) RunPostBindPlugins( - pc *PluginContext, pod *v1.Pod, nodeName string) { +func (f *framework) RunPostBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(postBind, nil, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.postBindPlugins { - pl.PostBind(pc, pod, nodeName) + f.runPostBindPlugin(ctx, pl, state, pod, nodeName) + } +} + +func (f *framework) runPostBindPlugin(ctx context.Context, pl PostBindPlugin, state *CycleState, pod *v1.Pod, nodeName string) { + if !state.ShouldRecordFrameworkMetrics() { + pl.PostBind(ctx, state, pod, nodeName) + return } + startTime := time.Now() + pl.PostBind(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(postBind, pl.Name(), nil, metrics.SinceInSeconds(startTime)) } // RunReservePlugins runs the set of configured reserve plugins. If any of these // plugins returns an error, it does not continue running the remaining ones and // returns the error. In such case, pod will not be scheduled. -func (f *framework) RunReservePlugins( - pc *PluginContext, pod *v1.Pod, nodeName string) *Status { +func (f *framework) RunReservePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(reserve, status, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.reservePlugins { - status := pl.Reserve(pc, pod, nodeName) + status = f.runReservePlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { msg := fmt.Sprintf("error while running %q reserve plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) klog.Error(msg) @@ -491,12 +682,37 @@ func (f *framework) RunReservePlugins( return nil } +func (f *framework) runReservePlugin(ctx context.Context, pl ReservePlugin, state *CycleState, pod *v1.Pod, nodeName string) *Status { + if !state.ShouldRecordFrameworkMetrics() { + return pl.Reserve(ctx, state, pod, nodeName) + } + startTime := time.Now() + status := pl.Reserve(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(reserve, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status +} + // RunUnreservePlugins runs the set of configured unreserve plugins. -func (f *framework) RunUnreservePlugins( - pc *PluginContext, pod *v1.Pod, nodeName string) { +func (f *framework) RunUnreservePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(unreserve, nil, metrics.SinceInSeconds(startTime)) + }() + } for _, pl := range f.unreservePlugins { - pl.Unreserve(pc, pod, nodeName) + f.runUnreservePlugin(ctx, pl, state, pod, nodeName) + } +} + +func (f *framework) runUnreservePlugin(ctx context.Context, pl UnreservePlugin, state *CycleState, pod *v1.Pod, nodeName string) { + if !state.ShouldRecordFrameworkMetrics() { + pl.Unreserve(ctx, state, pod, nodeName) + return } + startTime := time.Now() + pl.Unreserve(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(unreserve, pl.Name(), nil, metrics.SinceInSeconds(startTime)) } // RunPermitPlugins runs the set of configured permit plugins. If any of these @@ -506,12 +722,17 @@ func (f *framework) RunUnreservePlugins( // returned by the plugin, if the time expires, then it will return an error. // Note that if multiple plugins asked to wait, then we wait for the minimum // timeout duration. -func (f *framework) RunPermitPlugins( - pc *PluginContext, pod *v1.Pod, nodeName string) *Status { - timeout := maxTimeout +func (f *framework) RunPermitPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) (status *Status) { + if state.ShouldRecordFrameworkMetrics() { + startTime := time.Now() + defer func() { + f.metricsRecorder.observeExtensionPointDurationAsync(permit, status, metrics.SinceInSeconds(startTime)) + }() + } + pluginsWaitTime := make(map[string]time.Duration) statusCode := Success for _, pl := range f.permitPlugins { - status, d := pl.Permit(pc, pod, nodeName) + status, timeout := f.runPermitPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { if status.IsUnschedulable() { msg := fmt.Sprintf("rejected by %q at permit: %v", pl.Name(), status.Message()) @@ -519,10 +740,11 @@ func (f *framework) RunPermitPlugins( return NewStatus(status.Code(), msg) } if status.Code() == Wait { - // Use the minimum timeout duration. - if timeout > d { - timeout = d + // Not allowed to be greater than maxTimeout. + if timeout > maxTimeout { + timeout = maxTimeout } + pluginsWaitTime[pl.Name()] = timeout statusCode = Wait } else { msg := fmt.Sprintf("error while running %q permit plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) @@ -535,39 +757,44 @@ func (f *framework) RunPermitPlugins( // We now wait for the minimum duration if at least one plugin asked to // wait (and no plugin rejected the pod) if statusCode == Wait { - w := newWaitingPod(pod) + startTime := time.Now() + w := newWaitingPod(pod, pluginsWaitTime) f.waitingPods.add(w) defer f.waitingPods.remove(pod.UID) - timer := time.NewTimer(timeout) - klog.V(4).Infof("waiting for %v for pod %q at permit", timeout, pod.Name) - select { - case <-timer.C: - msg := fmt.Sprintf("pod %q rejected due to timeout after waiting %v at permit", pod.Name, timeout) - klog.V(4).Infof(msg) - return NewStatus(Unschedulable, msg) - case s := <-w.s: - if !s.IsSuccess() { - if s.IsUnschedulable() { - msg := fmt.Sprintf("rejected while waiting at permit: %v", s.Message()) - klog.V(4).Infof(msg) - return NewStatus(s.Code(), msg) - } - msg := fmt.Sprintf("error received while waiting at permit for pod %q: %v", pod.Name, s.Message()) - klog.Error(msg) - return NewStatus(Error, msg) + klog.V(4).Infof("waiting for pod %q at permit", pod.Name) + s := <-w.s + metrics.PermitWaitDuration.WithLabelValues(s.Code().String()).Observe(metrics.SinceInSeconds(startTime)) + if !s.IsSuccess() { + if s.IsUnschedulable() { + msg := fmt.Sprintf("pod %q rejected while waiting at permit: %v", pod.Name, s.Message()) + klog.V(4).Infof(msg) + return NewStatus(s.Code(), msg) } + msg := fmt.Sprintf("error received while waiting at permit for pod %q: %v", pod.Name, s.Message()) + klog.Error(msg) + return NewStatus(Error, msg) } } return nil } -// NodeInfoSnapshot returns the latest NodeInfo snapshot. The snapshot -// is taken at the beginning of a scheduling cycle and remains unchanged until a -// pod finishes "Reserve". There is no guarantee that the information remains -// unchanged after "Reserve". -func (f *framework) NodeInfoSnapshot() *schedulernodeinfo.Snapshot { - return f.nodeInfoSnapshot +func (f *framework) runPermitPlugin(ctx context.Context, pl PermitPlugin, state *CycleState, pod *v1.Pod, nodeName string) (*Status, time.Duration) { + if !state.ShouldRecordFrameworkMetrics() { + return pl.Permit(ctx, state, pod, nodeName) + } + startTime := time.Now() + status, timeout := pl.Permit(ctx, state, pod, nodeName) + f.metricsRecorder.observePluginDurationAsync(permit, pl.Name(), status, metrics.SinceInSeconds(startTime)) + return status, timeout +} + +// SnapshotSharedLister returns the scheduler's SharedLister of the latest NodeInfo +// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains +// unchanged until a pod finishes "Reserve". There is no guarantee that the information +// remains unchanged after "Reserve". +func (f *framework) SnapshotSharedLister() schedulerlisters.SharedLister { + return f.snapshotSharedLister } // IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. @@ -580,16 +807,64 @@ func (f *framework) GetWaitingPod(uid types.UID) WaitingPod { return f.waitingPods.get(uid) } -func pluginNameToConfig(args []config.PluginConfig) map[string]*runtime.Unknown { - pc := make(map[string]*runtime.Unknown, 0) - for _, p := range args { - pc[p.Name] = &p.Args +// RejectWaitingPod rejects a WaitingPod given its UID. +func (f *framework) RejectWaitingPod(uid types.UID) { + waitingPod := f.waitingPods.get(uid) + if waitingPod != nil { + waitingPod.Reject("removed") } - return pc } -func pluginsNeeded(plugins *config.Plugins) map[string]config.Plugin { - pgMap := make(map[string]config.Plugin, 0) +// HasFilterPlugins returns true if at least one filter plugin is defined. +func (f *framework) HasFilterPlugins() bool { + return len(f.filterPlugins) > 0 +} + +// HasScorePlugins returns true if at least one score plugin is defined. +func (f *framework) HasScorePlugins() bool { + return len(f.scorePlugins) > 0 +} + +// ListPlugins returns a map of extension point name to plugin names configured at each extension +// point. Returns nil if no plugins where configred. +func (f *framework) ListPlugins() map[string][]config.Plugin { + m := make(map[string][]config.Plugin) + + for _, e := range f.getExtensionPoints(&config.Plugins{}) { + plugins := reflect.ValueOf(e.slicePtr).Elem() + extName := plugins.Type().Elem().Name() + var cfgs []config.Plugin + for i := 0; i < plugins.Len(); i++ { + name := plugins.Index(i).Interface().(Plugin).Name() + p := config.Plugin{Name: name} + if extName == "ScorePlugin" { + // Weights apply only to score plugins. + p.Weight = int32(f.pluginNameToWeightMap[name]) + } + cfgs = append(cfgs, p) + } + if len(cfgs) > 0 { + m[extName] = cfgs + } + } + if len(m) > 0 { + return m + } + return nil +} + +// ClientSet returns a kubernetes clientset. +func (f *framework) ClientSet() clientset.Interface { + return f.clientSet +} + +// SharedInformerFactory returns a shared informer factory. +func (f *framework) SharedInformerFactory() informers.SharedInformerFactory { + return f.informerFactory +} + +func (f *framework) pluginsNeeded(plugins *config.Plugins) map[string]config.Plugin { + pgMap := make(map[string]config.Plugin) if plugins == nil { return pgMap @@ -603,17 +878,8 @@ func pluginsNeeded(plugins *config.Plugins) map[string]config.Plugin { pgMap[pg.Name] = pg } } - find(plugins.QueueSort) - find(plugins.PreFilter) - find(plugins.Filter) - find(plugins.PostFilter) - find(plugins.Score) - find(plugins.Reserve) - find(plugins.Permit) - find(plugins.PreBind) - find(plugins.Bind) - find(plugins.PostBind) - find(plugins.Unreserve) - + for _, e := range f.getExtensionPoints(plugins) { + find(e.plugins) + } return pgMap } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go index 246bfdb34f4..da7309d5f58 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go @@ -19,25 +19,27 @@ limitations under the License. package v1alpha1 import ( + "context" "errors" + "math" "time" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/scheduler/apis/config" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) -// Code is the Status code/type which is returned from plugins. -type Code int - // NodeScoreList declares a list of nodes and their scores. type NodeScoreList []NodeScore // NodeScore is a struct with node name and score. type NodeScore struct { Name string - Score int + Score int64 } // PluginToNodeScores declares a map from plugin name to its NodeScoreList. @@ -46,6 +48,9 @@ type PluginToNodeScores map[string]NodeScoreList // NodeToStatusMap declares map from node name to its status. type NodeToStatusMap map[string]*Status +// Code is the Status code/type which is returned from plugins. +type Code int + // These are predefined codes used in a Status. const ( // Success means that plugin ran correctly and found pod schedulable. @@ -69,12 +74,22 @@ const ( Skip ) +// This list should be exactly the same as the codes iota defined above in the same order. +var codes = []string{"Success", "Error", "Unschedulable", "UnschedulableAndUnresolvable", "Wait", "Skip"} + +func (c Code) String() string { + return codes[c] +} + const ( // MaxNodeScore is the maximum score a Score plugin is expected to return. - MaxNodeScore int = schedulerapi.MaxPriority + MaxNodeScore int64 = 100 // MinNodeScore is the minimum score a Score plugin is expected to return. - MinNodeScore int = 0 + MinNodeScore int64 = 0 + + // MaxTotalScore is the maximum total score. + MaxTotalScore int64 = math.MaxInt64 ) // Status indicates the result of running a plugin. It consists of a code and a @@ -133,10 +148,14 @@ func NewStatus(code Code, msg string) *Status { type WaitingPod interface { // GetPod returns a reference to the waiting pod. GetPod() *v1.Pod - // Allow the waiting pod to be scheduled. Returns true if the allow signal was - // successfully delivered, false otherwise. - Allow() bool - // Reject declares the waiting pod unschedulable. Returns true if the allow signal + // GetPendingPlugins returns a list of pending permit plugin's name. + GetPendingPlugins() []string + // Allow declares the waiting pod is allowed to be scheduled by plugin pluginName. + // If this is the last remaining plugin to allow, then a success signal is delivered + // to unblock the pod. + // Returns true if the allow signal was successfully dealt with, false otherwise. + Allow(pluginName string) bool + // Reject declares the waiting pod unschedulable. Returns true if the reject signal // was successfully delivered, false otherwise. Reject(msg string) bool } @@ -146,11 +165,30 @@ type Plugin interface { Name() string } -// PodInfo is minimum cell in the scheduling queue. +// PodInfo is a wrapper to a Pod with additional information for purposes such as tracking +// the timestamp when it's added to the queue or recording per-pod metrics. type PodInfo struct { Pod *v1.Pod // The time pod added to the scheduling queue. Timestamp time.Time + // Number of schedule attempts before successfully scheduled. + // It's used to record the # attempts metric. + Attempts int + // The time when the pod is added to the queue for the first time. The pod may be added + // back to the queue multiple times before it's successfully scheduled. + // It shouldn't be updated once initialized. It's used to record the e2e scheduling + // latency for a pod. + InitialAttemptTimestamp time.Time +} + +// DeepCopy returns a deep copy of the PodInfo object. +func (podInfo *PodInfo) DeepCopy() *PodInfo { + return &PodInfo{ + Pod: podInfo.Pod.DeepCopy(), + Timestamp: podInfo.Timestamp, + Attempts: podInfo.Attempts, + InitialAttemptTimestamp: podInfo.InitialAttemptTimestamp, + } } // LessFunc is the function to sort pod info @@ -165,13 +203,32 @@ type QueueSortPlugin interface { Less(*PodInfo, *PodInfo) bool } +// PreFilterExtensions is an interface that is included in plugins that allow specifying +// callbacks to make incremental updates to its supposedly pre-calculated +// state. +type PreFilterExtensions interface { + // AddPod is called by the framework while trying to evaluate the impact + // of adding podToAdd to the node while scheduling podToSchedule. + AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status + // RemovePod is called by the framework while trying to evaluate the impact + // of removing podToRemove from the node while scheduling podToSchedule. + RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status +} + // PreFilterPlugin is an interface that must be implemented by "prefilter" plugins. // These plugins are called at the beginning of the scheduling cycle. type PreFilterPlugin interface { Plugin // PreFilter is called at the beginning of the scheduling cycle. All PreFilter // plugins must return success or the pod will be rejected. - PreFilter(pc *PluginContext, p *v1.Pod) *Status + PreFilter(ctx context.Context, state *CycleState, p *v1.Pod) *Status + // PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one, + // or nil if it does not. A Pre-filter plugin can provide extensions to incrementally + // modify its pre-processed info. The framework guarantees that the extensions + // AddPod/RemovePod will only be called after PreFilter, possibly on a cloned + // CycleState, and may call those functions more than once before calling + // Filter again on a specific node. + PreFilterExtensions() PreFilterExtensions } // FilterPlugin is an interface for Filter plugins. These plugins are called at the @@ -188,7 +245,14 @@ type FilterPlugin interface { // the given node fits the pod. If Filter doesn't return "Success", // please refer scheduler/algorithm/predicates/error.go // to set error message. - Filter(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + // For the node being evaluated, Filter plugins should look at the passed + // nodeInfo reference for this particular node's information (e.g., pods + // considered to be running on the node) instead of looking it up in the + // NodeInfoSnapshot because we don't guarantee that they will be the same. + // For example, during preemption, we may pass a copy of the original + // nodeInfo object that has some pods removed from it to evaluate the + // possibility of preempting them to schedule the target pod. + Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status } // PostFilterPlugin is an interface for Post-filter plugin. Post-filter is an @@ -201,7 +265,15 @@ type PostFilterPlugin interface { // passed the filtering phase. All postfilter plugins must return success or // the pod will be rejected. The filteredNodesStatuses is the set of filtered nodes // and their filter status. - PostFilter(pc *PluginContext, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status + PostFilter(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status +} + +// ScoreExtensions is an interface for Score extended functionality. +type ScoreExtensions interface { + // NormalizeScore is called for all node scores produced by the same plugin's "Score" + // method. A successful run of NormalizeScore will update the scores list and return + // a success status. + NormalizeScore(ctx context.Context, state *CycleState, p *v1.Pod, scores NodeScoreList) *Status } // ScorePlugin is an interface that must be implemented by "score" plugins to rank @@ -211,18 +283,10 @@ type ScorePlugin interface { // Score is called on each filtered node. It must return success and an integer // indicating the rank of the node. All scoring plugins must return success or // the pod will be rejected. - Score(pc *PluginContext, p *v1.Pod, nodeName string) (int, *Status) -} + Score(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) (int64, *Status) -// ScoreWithNormalizePlugin is an interface that must be implemented by "score" -// plugins that also need to normalize the node scoring results produced by the same -// plugin's "Score" method. -type ScoreWithNormalizePlugin interface { - ScorePlugin - // NormalizeScore is called for all node scores produced by the same plugin's "Score" - // method. A successful run of NormalizeScore will update the scores list and return - // a success status. - NormalizeScore(pc *PluginContext, p *v1.Pod, scores NodeScoreList) *Status + // ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not. + ScoreExtensions() ScoreExtensions } // ReservePlugin is an interface for Reserve plugins. These plugins are called @@ -235,7 +299,7 @@ type ReservePlugin interface { Plugin // Reserve is called by the scheduling framework when the scheduler cache is // updated. - Reserve(pc *PluginContext, p *v1.Pod, nodeName string) *Status + Reserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status } // PreBindPlugin is an interface that must be implemented by "prebind" plugins. @@ -244,7 +308,7 @@ type PreBindPlugin interface { Plugin // PreBind is called before binding a pod. All prebind plugins must return // success or the pod will be rejected and won't be sent for binding. - PreBind(pc *PluginContext, p *v1.Pod, nodeName string) *Status + PreBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status } // PostBindPlugin is an interface that must be implemented by "postbind" plugins. @@ -255,7 +319,7 @@ type PostBindPlugin interface { // informational. A common application of this extension point is for cleaning // up. If a plugin needs to clean-up its state after a pod is scheduled and // bound, PostBind is the extension point that it should register. - PostBind(pc *PluginContext, p *v1.Pod, nodeName string) + PostBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) } // UnreservePlugin is an interface for Unreserve plugins. This is an informational @@ -266,7 +330,7 @@ type UnreservePlugin interface { Plugin // Unreserve is called by the scheduling framework when a reserved pod was // rejected in a later phase. - Unreserve(pc *PluginContext, p *v1.Pod, nodeName string) + Unreserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) } // PermitPlugin is an interface that must be implemented by "permit" plugins. @@ -279,7 +343,7 @@ type PermitPlugin interface { // The pod will also be rejected if the wait timeout or the pod is rejected while // waiting. Note that if the plugin returns "wait", the framework will wait only // after running the remaining plugins given that no other plugin rejects the pod. - Permit(pc *PluginContext, p *v1.Pod, nodeName string) (*Status, time.Duration) + Permit(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) (*Status, time.Duration) } // BindPlugin is an interface that must be implemented by "bind" plugins. Bind @@ -292,7 +356,7 @@ type BindPlugin interface { // remaining bind plugins are skipped. When a bind plugin does not handle a pod, // it must return Skip in its Status code. If a bind plugin returns an Error, the // pod is rejected and will not be bound. - Bind(pc *PluginContext, p *v1.Pod, nodeName string) *Status + Bind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status } // Framework manages the set of plugins in use by the scheduling framework. @@ -306,41 +370,57 @@ type Framework interface { // *Status and its code is set to non-success if any of the plugins returns // anything but Success. If a non-success status is returned, then the scheduling // cycle is aborted. - RunPreFilterPlugins(pc *PluginContext, pod *v1.Pod) *Status - - // RunFilterPlugins runs the set of configured filter plugins for pod on the - // given host. If any of these plugins returns any status other than "Success", - // the given node is not suitable for running the pod. - RunFilterPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + RunPreFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod) *Status + + // RunFilterPlugins runs the set of configured filter plugins for pod on + // the given node. It returns directly if any of the filter plugins + // return any status other than "Success". Note that for the node being + // evaluated, the passed nodeInfo reference could be different from the + // one in NodeInfoSnapshot map (e.g., pods considered to be running on + // the node could be different). For example, during preemption, we may + // pass a copy of the original nodeInfo object that has some pods + // removed from it to evaluate the possibility of preempting them to + // schedule the target pod. + RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status + + // RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured + // PreFilter plugins. It returns directly if any of the plugins return any + // status other than Success. + RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status + + // RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured + // PreFilter plugins. It returns directly if any of the plugins return any + // status other than Success. + RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status // RunPostFilterPlugins runs the set of configured post-filter plugins. If any // of these plugins returns any status other than "Success", the given node is // rejected. The filteredNodeStatuses is the set of filtered nodes and their statuses. - RunPostFilterPlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status + RunPostFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status // RunScorePlugins runs the set of configured scoring plugins. It returns a map that // stores for each scoring plugin name the corresponding NodeScoreList(s). // It also returns *Status, which is set to non-success if any of the plugins returns // a non-success status. - RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1.Node) (PluginToNodeScores, *Status) + RunScorePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node) (PluginToNodeScores, *Status) // RunPreBindPlugins runs the set of configured prebind plugins. It returns // *Status and its code is set to non-success if any of the plugins returns // anything but Success. If the Status code is "Unschedulable", it is // considered as a scheduling check failure, otherwise, it is considered as an // internal error. In either case the pod is not going to be bound. - RunPreBindPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + RunPreBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status // RunPostBindPlugins runs the set of configured postbind plugins. - RunPostBindPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) + RunPostBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) // RunReservePlugins runs the set of configured reserve plugins. If any of these // plugins returns an error, it does not continue running the remaining ones and // returns the error. In such case, pod will not be scheduled. - RunReservePlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + RunReservePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status // RunUnreservePlugins runs the set of configured unreserve plugins. - RunUnreservePlugins(pc *PluginContext, pod *v1.Pod, nodeName string) + RunUnreservePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) // RunPermitPlugins runs the set of configured permit plugins. If any of these // plugins returns a status other than "Success" or "Wait", it does not continue @@ -349,32 +429,49 @@ type Framework interface { // returned by the plugin, if the time expires, then it will return an error. // Note that if multiple plugins asked to wait, then we wait for the minimum // timeout duration. - RunPermitPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + RunPermitPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status // RunBindPlugins runs the set of configured bind plugins. A bind plugin may choose // whether or not to handle the given Pod. If a bind plugin chooses to skip the // binding, it should return code=4("skip") status. Otherwise, it should return "Error" // or "Success". If none of the plugins handled binding, RunBindPlugins returns // code=4("skip") status. - RunBindPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + RunBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status + + // HasFilterPlugins returns true if at least one filter plugin is defined. + HasFilterPlugins() bool + + // HasScorePlugins returns true if at least one score plugin is defined. + HasScorePlugins() bool + + // ListPlugins returns a map of extension point name to list of configured Plugins. + ListPlugins() map[string][]config.Plugin } // FrameworkHandle provides data and some tools that plugins can use. It is // passed to the plugin factories at the time of plugin initialization. Plugins // must store and use this handle to call framework functions. type FrameworkHandle interface { - // NodeInfoSnapshot return the latest NodeInfo snapshot. The snapshot + // SnapshotSharedLister returns listers from the latest NodeInfo Snapshot. The snapshot // is taken at the beginning of a scheduling cycle and remains unchanged until // a pod finishes "Reserve" point. There is no guarantee that the information // remains unchanged in the binding phase of scheduling, so plugins in the binding // cycle(permit/pre-bind/bind/post-bind/un-reserve plugin) should not use it, // otherwise a concurrent read/write error might occur, they should use scheduler // cache instead. - NodeInfoSnapshot() *schedulernodeinfo.Snapshot + SnapshotSharedLister() schedulerlisters.SharedLister // IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. IterateOverWaitingPods(callback func(WaitingPod)) // GetWaitingPod returns a waiting pod given its UID. GetWaitingPod(uid types.UID) WaitingPod + + // RejectWaitingPod rejects a waiting pod given its UID. + RejectWaitingPod(uid types.UID) + + // ClientSet returns a kubernetes clientSet. + ClientSet() clientset.Interface + + SharedInformerFactory() informers.SharedInformerFactory } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/metrics_recorder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/metrics_recorder.go new file mode 100644 index 00000000000..f6ae15f97ff --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/metrics_recorder.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/kubernetes/pkg/scheduler/metrics" +) + +// frameworkMetric is the data structure passed in the buffer channel between the main framework thread +// and the metricsRecorder goroutine. +type frameworkMetric struct { + metric *k8smetrics.HistogramVec + labelValues []string + value float64 +} + +// metricRecorder records framework metrics in a separate goroutine to avoid overhead in the critical path. +type metricsRecorder struct { + // bufferCh is a channel that serves as a metrics buffer before the metricsRecorder goroutine reports it. + bufferCh chan *frameworkMetric + // if bufferSize is reached, incoming metrics will be discarded. + bufferSize int + // how often the recorder runs to flush the metrics. + interval time.Duration + + // stopCh is used to stop the goroutine which periodically flushes metrics. It's currently only + // used in tests. + stopCh chan struct{} + // isStoppedCh indicates whether the goroutine is stopped. It's used in tests only to make sure + // the metric flushing goroutine is stopped so that tests can collect metrics for verification. + isStoppedCh chan struct{} +} + +func newMetricsRecorder(bufferSize int, interval time.Duration) *metricsRecorder { + recorder := &metricsRecorder{ + bufferCh: make(chan *frameworkMetric, bufferSize), + bufferSize: bufferSize, + interval: interval, + stopCh: make(chan struct{}), + isStoppedCh: make(chan struct{}), + } + go recorder.run() + return recorder +} + +// observeExtensionPointDurationAsync observes the framework_extension_point_duration_seconds metric. +// The metric will be flushed to Prometheus asynchronously. +func (r *metricsRecorder) observeExtensionPointDurationAsync(extensionPoint string, status *Status, value float64) { + newMetric := &frameworkMetric{ + metric: metrics.FrameworkExtensionPointDuration, + labelValues: []string{extensionPoint, status.Code().String()}, + value: value, + } + select { + case r.bufferCh <- newMetric: + default: + } +} + +// observeExtensionPointDurationAsync observes the plugin_execution_duration_seconds metric. +// The metric will be flushed to Prometheus asynchronously. +func (r *metricsRecorder) observePluginDurationAsync(extensionPoint, pluginName string, status *Status, value float64) { + newMetric := &frameworkMetric{ + metric: metrics.PluginExecutionDuration, + labelValues: []string{pluginName, extensionPoint, status.Code().String()}, + value: value, + } + select { + case r.bufferCh <- newMetric: + default: + } +} + +// run flushes buffered metrics into Prometheus every second. +func (r *metricsRecorder) run() { + for { + select { + case <-r.stopCh: + close(r.isStoppedCh) + return + default: + } + r.flushMetrics() + time.Sleep(r.interval) + } +} + +// flushMetrics tries to clean up the bufferCh by reading at most bufferSize metrics. +func (r *metricsRecorder) flushMetrics() { + for i := 0; i < r.bufferSize; i++ { + select { + case m := <-r.bufferCh: + m.metric.WithLabelValues(m.labelValues...).Observe(m.value) + default: + return + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/registry.go index c25c3baf23d..f2398da8a0a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/registry.go @@ -69,18 +69,12 @@ func (r Registry) Unregister(name string) error { return nil } -// NewRegistry builds a default registry with all the default plugins. -// This is the registry that Kubernetes default scheduler uses. A scheduler that -// runs custom plugins, can pass a different Registry and when initializing the -// scheduler. -func NewRegistry() Registry { - return Registry{ - // FactoryMap: - // New plugins are registered here. - // example: - // { - // stateful_plugin.Name: stateful.NewStatefulMultipointExample, - // fooplugin.Name: fooplugin.New, - // } +// Merge merges the provided registry to the current one. +func (r Registry) Merge(in Registry) error { + for name, factory := range in { + if err := r.Register(name, factory); err != nil { + return err + } } + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go index 842eff5e538..df7dbe6eeea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go @@ -17,7 +17,9 @@ limitations under the License. package v1alpha1 import ( + "fmt" "sync" + "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -69,16 +71,34 @@ func (m *waitingPodsMap) iterate(callback func(WaitingPod)) { // waitingPod represents a pod waiting in the permit phase. type waitingPod struct { - pod *v1.Pod - s chan *Status + pod *v1.Pod + pendingPlugins map[string]*time.Timer + s chan *Status + mu sync.RWMutex } // newWaitingPod returns a new waitingPod instance. -func newWaitingPod(pod *v1.Pod) *waitingPod { - return &waitingPod{ +func newWaitingPod(pod *v1.Pod, pluginsMaxWaitTime map[string]time.Duration) *waitingPod { + wp := &waitingPod{ pod: pod, s: make(chan *Status), } + + wp.pendingPlugins = make(map[string]*time.Timer, len(pluginsMaxWaitTime)) + // The time.AfterFunc calls wp.Reject which iterates through pendingPlugins map. Acquire the + // lock here so that time.AfterFunc can only execute after newWaitingPod finishes. + wp.mu.Lock() + defer wp.mu.Unlock() + for k, v := range pluginsMaxWaitTime { + plugin, waitTime := k, v + wp.pendingPlugins[plugin] = time.AfterFunc(waitTime, func() { + msg := fmt.Sprintf("rejected due to timeout after waiting %v at plugin %v", + waitTime, plugin) + wp.Reject(msg) + }) + } + + return wp } // GetPod returns a reference to the waiting pod. @@ -86,9 +106,35 @@ func (w *waitingPod) GetPod() *v1.Pod { return w.pod } -// Allow the waiting pod to be scheduled. Returns true if the allow signal was -// successfully delivered, false otherwise. -func (w *waitingPod) Allow() bool { +// GetPendingPlugins returns a list of pending permit plugin's name. +func (w *waitingPod) GetPendingPlugins() []string { + w.mu.RLock() + defer w.mu.RUnlock() + plugins := make([]string, 0, len(w.pendingPlugins)) + for p := range w.pendingPlugins { + plugins = append(plugins, p) + } + + return plugins +} + +// Allow declares the waiting pod is allowed to be scheduled by plugin pluginName. +// If this is the last remaining plugin to allow, then a success signal is delivered +// to unblock the pod. +// Returns true if the allow signal was successfully dealt with, false otherwise. +func (w *waitingPod) Allow(pluginName string) bool { + w.mu.Lock() + defer w.mu.Unlock() + if timer, exist := w.pendingPlugins[pluginName]; exist { + timer.Stop() + delete(w.pendingPlugins, pluginName) + } + + // Only signal success status after all plugins have allowed + if len(w.pendingPlugins) != 0 { + return true + } + select { case w.s <- NewStatus(Success, ""): return true @@ -97,9 +143,15 @@ func (w *waitingPod) Allow() bool { } } -// Reject declares the waiting pod unschedulable. Returns true if the allow signal +// Reject declares the waiting pod unschedulable. Returns true if the reject signal // was successfully delivered, false otherwise. func (w *waitingPod) Reject(msg string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + for _, timer := range w.pendingPlugins { + timer.Stop() + } + select { case w.s <- NewStatus(Unschedulable, msg): return true diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD index 7e002c10d26..67e0dad1bfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD @@ -8,14 +8,15 @@ go_library( "node_tree.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache", - visibility = ["//visibility:public"], + visibility = ["//pkg/scheduler:__subpackages__"], deps = [ "//pkg/features:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -35,10 +36,10 @@ go_test( "//pkg/features:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go index 9a25cb4daf8..d740c0625d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go @@ -22,16 +22,16 @@ import ( "time" v1 "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" + "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - - "k8s.io/klog" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) var ( @@ -70,11 +70,10 @@ type schedulerCache struct { // a map from pod key to podState. podStates map[string]*podState nodes map[string]*nodeInfoListItem - csiNodes map[string]*storagev1beta1.CSINode // headNode points to the most recently updated NodeInfo in "nodes". It is the // head of the linked list. headNode *nodeInfoListItem - nodeTree *NodeTree + nodeTree *nodeTree // A map from image name to its imageState. imageStates map[string]*imageState } @@ -110,7 +109,6 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul nodes: make(map[string]*nodeInfoListItem), nodeTree: newNodeTree(nil), - csiNodes: make(map[string]*storagev1beta1.CSINode), assumedPods: make(map[string]bool), podStates: make(map[string]*podState), imageStates: make(map[string]*imageState), @@ -203,12 +201,12 @@ func (cache *schedulerCache) Snapshot() *Snapshot { // beginning of every scheduling cycle. // This function tracks generation number of NodeInfo and updates only the // entries of an existing snapshot that have changed after the snapshot was taken. -func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *schedulernodeinfo.Snapshot) error { +func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error { cache.mu.Lock() defer cache.mu.Unlock() balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) - // Get the last generation of the the snapshot. + // Get the last generation of the snapshot. snapshotGeneration := nodeSnapshot.Generation // Start from the head of the NodeInfo doubly linked list and update snapshot @@ -238,6 +236,21 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *schedulernodei } } } + + // Take a snapshot of the nodes order in the tree + nodeSnapshot.NodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) + nodeSnapshot.HavePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) + for i := 0; i < cache.nodeTree.numNodes; i++ { + nodeName := cache.nodeTree.next() + if n := nodeSnapshot.NodeInfoMap[nodeName]; n != nil { + nodeSnapshot.NodeInfoList = append(nodeSnapshot.NodeInfoList, n) + if len(n.PodsWithAffinity()) > 0 { + nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n) + } + } else { + klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName) + } + } return nil } @@ -246,7 +259,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { return cache.FilteredList(alwaysTrue, selector) } -func (cache *schedulerCache) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { +func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { cache.mu.RLock() defer cache.mu.RUnlock() // podFilter is expected to return true for most or all of the pods. We @@ -516,7 +529,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error { } cache.moveNodeInfoToHead(node.Name) - cache.nodeTree.AddNode(node) + cache.nodeTree.addNode(node) cache.addNodeImageStates(node, n.info) return n.info.SetNode(node) } @@ -534,7 +547,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { } cache.moveNodeInfoToHead(newNode.Name) - cache.nodeTree.UpdateNode(oldNode, newNode) + cache.nodeTree.updateNode(oldNode, newNode) cache.addNodeImageStates(newNode, n.info) return n.info.SetNode(newNode) } @@ -560,41 +573,13 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { cache.moveNodeInfoToHead(node.Name) } - if err := cache.nodeTree.RemoveNode(node); err != nil { + if err := cache.nodeTree.removeNode(node); err != nil { return err } cache.removeNodeImageStates(node) return nil } -func (cache *schedulerCache) AddCSINode(csiNode *storagev1beta1.CSINode) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - cache.csiNodes[csiNode.Name] = csiNode - return nil -} - -func (cache *schedulerCache) UpdateCSINode(oldCSINode, newCSINode *storagev1beta1.CSINode) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - cache.csiNodes[newCSINode.Name] = newCSINode - return nil -} - -func (cache *schedulerCache) RemoveCSINode(csiNode *storagev1beta1.CSINode) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - _, ok := cache.csiNodes[csiNode.Name] - if !ok { - return fmt.Errorf("csinode %v is not found", csiNode.Name) - } - delete(cache.csiNodes, csiNode.Name) - return nil -} - // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // scheduler cache. This function assumes the lock to scheduler cache has been acquired. func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) { @@ -655,9 +640,11 @@ func (cache *schedulerCache) cleanupExpiredAssumedPods() { } // cleanupAssumedPods exists for making test deterministic by taking time as input argument. +// It also reports metrics on the cache size for nodes, pods, and assumed pods. func (cache *schedulerCache) cleanupAssumedPods(now time.Time) { cache.mu.Lock() defer cache.mu.Unlock() + defer cache.updateMetrics() // The size of assumedPods should be small for key := range cache.assumedPods { @@ -688,10 +675,6 @@ func (cache *schedulerCache) expirePod(key string, ps *podState) error { return nil } -func (cache *schedulerCache) NodeTree() *NodeTree { - return cache.nodeTree -} - // GetNodeInfo returns cached data for the node name. func (cache *schedulerCache) GetNodeInfo(nodeName string) (*v1.Node, error) { cache.mu.RLock() @@ -705,30 +688,9 @@ func (cache *schedulerCache) GetNodeInfo(nodeName string) (*v1.Node, error) { return n.info.Node(), nil } -// ListNodes returns the cached list of nodes. -func (cache *schedulerCache) ListNodes() []*v1.Node { - cache.mu.RLock() - defer cache.mu.RUnlock() - - nodes := make([]*v1.Node, 0, len(cache.nodes)) - for _, node := range cache.nodes { - // Node info is sometimes not removed immediately. See schedulerCache.RemoveNode. - n := node.info.Node() - if n != nil { - nodes = append(nodes, n) - } - } - return nodes -} - -func (cache *schedulerCache) GetCSINodeInfo(nodeName string) (*storagev1beta1.CSINode, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - - n, ok := cache.csiNodes[nodeName] - if !ok { - return nil, fmt.Errorf("error retrieving csinode '%v' from cache", nodeName) - } - - return n, nil +// updateMetrics updates cache size metric values for pods, assumed pods, and nodes +func (cache *schedulerCache) updateMetrics() { + metrics.CacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods))) + metrics.CacheSize.WithLabelValues("pods").Set(float64(len(cache.podStates))) + metrics.CacheSize.WithLabelValues("nodes").Set(float64(len(cache.nodes))) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go index 00f711d4869..a4c47cc0cc3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go @@ -18,9 +18,9 @@ package cache import ( v1 "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) // Cache collects pods' information and provides node-level aggregated information. @@ -58,8 +58,7 @@ import ( // - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, // a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. type Cache interface { - algorithm.PodLister - algorithm.NodeLister + schedulerlisters.PodLister // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). @@ -101,28 +100,10 @@ type Cache interface { // UpdateNodeInfoSnapshot updates the passed infoSnapshot to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. - UpdateNodeInfoSnapshot(nodeSnapshot *schedulernodeinfo.Snapshot) error - - // AddCSINode adds overall CSI-related information about node. - AddCSINode(csiNode *storagev1beta1.CSINode) error - - // UpdateCSINode updates overall CSI-related information about node. - UpdateCSINode(oldCSINode, newCSINode *storagev1beta1.CSINode) error - - // RemoveCSINode removes overall CSI-related information about node. - RemoveCSINode(csiNode *storagev1beta1.CSINode) error - - // GetNodeInfo returns the node object with node string. - GetNodeInfo(nodeName string) (*v1.Node, error) - - // GetCSINodeInfo returns the csinode object with the given name. - GetCSINodeInfo(nodeName string) (*storagev1beta1.CSINode, error) + UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error // Snapshot takes a snapshot on current cache Snapshot() *Snapshot - - // NodeTree returns a node tree structure - NodeTree() *NodeTree } // Snapshot is a snapshot of cache state diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go index de53feef487..4a81182f7eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go @@ -18,22 +18,21 @@ package cache import ( "fmt" - "sync" "k8s.io/api/core/v1" - utilnode "k8s.io/kubernetes/pkg/util/node" - "k8s.io/klog" + utilnode "k8s.io/kubernetes/pkg/util/node" ) -// NodeTree is a tree-like data structure that holds node names in each zone. Zone names are +// nodeTree is a tree-like data structure that holds node names in each zone. Zone names are // keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names. -type NodeTree struct { +// NodeTree is NOT thread-safe, any concurrent updates/reads from it must be synchronized by the caller. +// It is used only by schedulerCache, and should stay as such. +type nodeTree struct { tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone. zones []string // a list of all the zones in the tree (keys) zoneIndex int numNodes int - mu sync.RWMutex } // nodeArray is a struct that has nodes that are in a zone. @@ -58,8 +57,8 @@ func (na *nodeArray) next() (nodeName string, exhausted bool) { } // newNodeTree creates a NodeTree from nodes. -func newNodeTree(nodes []*v1.Node) *NodeTree { - nt := &NodeTree{ +func newNodeTree(nodes []*v1.Node) *nodeTree { + nt := &nodeTree{ tree: make(map[string]*nodeArray), } for _, n := range nodes { @@ -68,15 +67,9 @@ func newNodeTree(nodes []*v1.Node) *NodeTree { return nt } -// AddNode adds a node and its corresponding zone to the tree. If the zone already exists, the node +// addNode adds a node and its corresponding zone to the tree. If the zone already exists, the node // is added to the array of nodes in that zone. -func (nt *NodeTree) AddNode(n *v1.Node) { - nt.mu.Lock() - defer nt.mu.Unlock() - nt.addNode(n) -} - -func (nt *NodeTree) addNode(n *v1.Node) { +func (nt *nodeTree) addNode(n *v1.Node) { zone := utilnode.GetZoneKey(n) if na, ok := nt.tree[zone]; ok { for _, nodeName := range na.nodes { @@ -94,14 +87,8 @@ func (nt *NodeTree) addNode(n *v1.Node) { nt.numNodes++ } -// RemoveNode removes a node from the NodeTree. -func (nt *NodeTree) RemoveNode(n *v1.Node) error { - nt.mu.Lock() - defer nt.mu.Unlock() - return nt.removeNode(n) -} - -func (nt *NodeTree) removeNode(n *v1.Node) error { +// removeNode removes a node from the NodeTree. +func (nt *nodeTree) removeNode(n *v1.Node) error { zone := utilnode.GetZoneKey(n) if na, ok := nt.tree[zone]; ok { for i, nodeName := range na.nodes { @@ -122,7 +109,7 @@ func (nt *NodeTree) removeNode(n *v1.Node) error { // removeZone removes a zone from tree. // This function must be called while writer locks are hold. -func (nt *NodeTree) removeZone(zone string) { +func (nt *nodeTree) removeZone(zone string) { delete(nt.tree, zone) for i, z := range nt.zones { if z == zone { @@ -132,8 +119,8 @@ func (nt *NodeTree) removeZone(zone string) { } } -// UpdateNode updates a node in the NodeTree. -func (nt *NodeTree) UpdateNode(old, new *v1.Node) { +// updateNode updates a node in the NodeTree. +func (nt *nodeTree) updateNode(old, new *v1.Node) { var oldZone string if old != nil { oldZone = utilnode.GetZoneKey(old) @@ -144,24 +131,20 @@ func (nt *NodeTree) UpdateNode(old, new *v1.Node) { if oldZone == newZone { return } - nt.mu.Lock() - defer nt.mu.Unlock() nt.removeNode(old) // No error checking. We ignore whether the old node exists or not. nt.addNode(new) } -func (nt *NodeTree) resetExhausted() { +func (nt *nodeTree) resetExhausted() { for _, na := range nt.tree { na.lastIndex = 0 } nt.zoneIndex = 0 } -// Next returns the name of the next node. NodeTree iterates over zones and in each zone iterates +// next returns the name of the next node. NodeTree iterates over zones and in each zone iterates // over nodes in a round robin fashion. -func (nt *NodeTree) Next() string { - nt.mu.Lock() - defer nt.mu.Unlock() +func (nt *nodeTree) next() string { if len(nt.zones) == 0 { return "" } @@ -185,10 +168,3 @@ func (nt *NodeTree) Next() string { } } } - -// NumNodes returns the number of nodes. -func (nt *NodeTree) NumNodes() int { - nt.mu.RLock() - defer nt.mu.RUnlock() - return nt.numNodes -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/BUILD similarity index 63% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/BUILD index 6b91c8b6c3e..ed96d8444d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/BUILD @@ -6,22 +6,20 @@ load( "go_test", ) -go_library( - name = "go_default_library", - srcs = [ - "dbus.go", - "doc.go", - "fake_dbus.go", - ], - importpath = "k8s.io/kubernetes/pkg/util/dbus", - deps = ["//vendor/github.com/godbus/dbus:go_default_library"], -) - go_test( name = "go_default_test", - srcs = ["dbus_test.go"], + srcs = ["heap_test.go"], embed = [":go_default_library"], - deps = ["//vendor/github.com/godbus/dbus:go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["heap.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/internal/heap", + deps = [ + "//pkg/scheduler/metrics:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/heap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/heap.go similarity index 86% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/heap.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/heap.go index 13d6b2ffd13..49bf9d12388 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/heap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/heap/heap.go @@ -18,7 +18,7 @@ limitations under the License. // as cache.heap, however, this heap does not perform synchronization. It leaves // synchronization to the SchedulingQueue. -package util +package heap import ( "container/heap" @@ -41,9 +41,9 @@ type itemKeyValue struct { obj interface{} } -// heapData is an internal struct that implements the standard heap interface +// data is an internal struct that implements the standard heap interface // and keeps the data stored in the heap. -type heapData struct { +type data struct { // items is a map from key of the objects to the objects and their index. // We depend on the property that items in the map are in the queue and vice versa. items map[string]*heapItem @@ -56,16 +56,16 @@ type heapData struct { // should be deterministic. keyFunc KeyFunc // lessFunc is used to compare two objects in the heap. - lessFunc LessFunc + lessFunc lessFunc } var ( - _ = heap.Interface(&heapData{}) // heapData is a standard heap + _ = heap.Interface(&data{}) // heapData is a standard heap ) // Less compares two objects and returns true if the first one should go // in front of the second one in the heap. -func (h *heapData) Less(i, j int) bool { +func (h *data) Less(i, j int) bool { if i > len(h.queue) || j > len(h.queue) { return false } @@ -81,11 +81,11 @@ func (h *heapData) Less(i, j int) bool { } // Len returns the number of items in the Heap. -func (h *heapData) Len() int { return len(h.queue) } +func (h *data) Len() int { return len(h.queue) } // Swap implements swapping of two elements in the heap. This is a part of standard // heap interface and should never be called directly. -func (h *heapData) Swap(i, j int) { +func (h *data) Swap(i, j int) { h.queue[i], h.queue[j] = h.queue[j], h.queue[i] item := h.items[h.queue[i]] item.index = i @@ -94,7 +94,7 @@ func (h *heapData) Swap(i, j int) { } // Push is supposed to be called by heap.Push only. -func (h *heapData) Push(kv interface{}) { +func (h *data) Push(kv interface{}) { keyValue := kv.(*itemKeyValue) n := len(h.queue) h.items[keyValue.key] = &heapItem{keyValue.obj, n} @@ -102,7 +102,7 @@ func (h *heapData) Push(kv interface{}) { } // Pop is supposed to be called by heap.Pop only. -func (h *heapData) Pop() interface{} { +func (h *data) Pop() interface{} { key := h.queue[len(h.queue)-1] h.queue = h.queue[0 : len(h.queue)-1] item, ok := h.items[key] @@ -115,7 +115,7 @@ func (h *heapData) Pop() interface{} { } // Peek is supposed to be called by heap.Peek only. -func (h *heapData) Peek() interface{} { +func (h *data) Peek() interface{} { if len(h.queue) > 0 { return h.items[h.queue[0]].obj } @@ -127,7 +127,7 @@ func (h *heapData) Peek() interface{} { type Heap struct { // data stores objects and has a queue that keeps their ordering according // to the heap invariant. - data *heapData + data *data // metricRecorder updates the counter when elements of a heap get added or // removed, and it does nothing if it's nil metricRecorder metrics.MetricRecorder @@ -239,15 +239,15 @@ func (h *Heap) Len() int { return len(h.data.queue) } -// NewHeap returns a Heap which can be used to queue up items to process. -func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { - return NewHeapWithRecorder(keyFn, lessFn, nil) +// New returns a Heap which can be used to queue up items to process. +func New(keyFn KeyFunc, lessFn lessFunc) *Heap { + return NewWithRecorder(keyFn, lessFn, nil) } -// NewHeapWithRecorder wraps an optional metricRecorder to compose a Heap object. -func NewHeapWithRecorder(keyFn KeyFunc, lessFn LessFunc, metricRecorder metrics.MetricRecorder) *Heap { +// NewWithRecorder wraps an optional metricRecorder to compose a Heap object. +func NewWithRecorder(keyFn KeyFunc, lessFn lessFunc, metricRecorder metrics.MetricRecorder) *Heap { return &Heap{ - data: &heapData{ + data: &data{ items: map[string]*heapItem{}, queue: []string{}, keyFunc: keyFn, @@ -256,3 +256,7 @@ func NewHeapWithRecorder(keyFn KeyFunc, lessFn LessFunc, metricRecorder metrics. metricRecorder: metricRecorder, } } + +// lessFunc is a function that receives two items and returns true if the first +// item should be placed before the second one when the list is sorted. +type lessFunc = func(item1, item2 interface{}) bool diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD index 915e557e4ad..28f60951dd3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD @@ -3,15 +3,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "events.go", "pod_backoff.go", "scheduling_queue.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/internal/queue", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/heap:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -32,15 +35,20 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/events.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/events.go new file mode 100644 index 00000000000..44440f7fb3a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/events.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +// Events that trigger scheduler queue to change. +const ( + // Unknown event + Unknown = "Unknown" + // PodAdd is the event when a new pod is added to API server. + PodAdd = "PodAdd" + // NodeAdd is the event when a new node is added to the cluster. + NodeAdd = "NodeAdd" + // ScheduleAttemptFailure is the event when a schedule attempt fails. + ScheduleAttemptFailure = "ScheduleAttemptFailure" + // BackoffComplete is the event when a pod finishes backoff. + BackoffComplete = "BackoffComplete" + // UnschedulableTimeout is the event when a pod stays in unschedulable for longer than timeout. + UnschedulableTimeout = "UnschedulableTimeout" + // AssignedPodAdd is the event when a pod is added that causes pods with matching affinity terms + // to be more schedulable. + AssignedPodAdd = "AssignedPodAdd" + // AssignedPodUpdate is the event when a pod is updated that causes pods with matching affinity + // terms to be more schedulable. + AssignedPodUpdate = "AssignedPodUpdate" + // AssignedPodDelete is the event when a pod is deleted that causes pods with matching affinity + // terms to be more schedulable. + AssignedPodDelete = "AssignedPodDelete" + // PvAdd is the event when a persistent volume is added in the cluster. + PvAdd = "PvAdd" + // PvUpdate is the event when a persistent volume is updated in the cluster. + PvUpdate = "PvUpdate" + // PvcAdd is the event when a persistent volume claim is added in the cluster. + PvcAdd = "PvcAdd" + // PvcUpdate is the event when a persistent volume claim is updated in the cluster. + PvcUpdate = "PvcUpdate" + // StorageClassAdd is the event when a StorageClass is added in the cluster. + StorageClassAdd = "StorageClassAdd" + // ServiceAdd is the event when a service is added in the cluster. + ServiceAdd = "ServiceAdd" + // ServiceUpdate is the event when a service is updated in the cluster. + ServiceUpdate = "ServiceUpdate" + // ServiceDelete is the event when a service is deleted in the cluster. + ServiceDelete = "ServiceDelete" + // CSINodeAdd is the event when a CSI node is added in the cluster. + CSINodeAdd = "CSINodeAdd" + // CSINodeUpdate is the event when a CSI node is updated in the cluster. + CSINodeUpdate = "CSINodeUpdate" + // NodeSpecUnschedulableChange is the event when unschedulable node spec is changed. + NodeSpecUnschedulableChange = "NodeSpecUnschedulableChange" + // NodeAllocatableChange is the event when node allocatable is changed. + NodeAllocatableChange = "NodeAllocatableChange" + // NodeLabelsChange is the event when node label is changed. + NodeLabelChange = "NodeLabelChange" + // NodeTaintsChange is the event when node taint is changed. + NodeTaintChange = "NodeTaintChange" + // NodeConditionChange is the event when node condition is changed. + NodeConditionChange = "NodeConditionChange" +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go index 14e22aaafb8..74e25c1b0cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go @@ -15,7 +15,7 @@ limitations under the License. */ // This file contains structures that implement scheduling queue types. -// Scheduling queues hold pods waiting to be scheduled. This file implements a +// Scheduling queues hold pods waiting to be scheduled. This file implements a/ // priority queue which has two sub queues. One sub-queue holds pods that are // being considered for scheduling. This is called activeQ. Another queue holds // pods that are already tried and are determined to be unschedulable. The latter @@ -31,46 +31,58 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/heap" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" ) -var ( +const ( + // If the pod stays in unschedulableQ longer than the unschedulableQTimeInterval, + // the pod will be moved from unschedulableQ to activeQ. + unschedulableQTimeInterval = 60 * time.Second + queueClosed = "scheduling queue is closed" ) -// If the pod stays in unschedulableQ longer than the unschedulableQTimeInterval, -// the pod will be moved from unschedulableQ to activeQ. -const unschedulableQTimeInterval = 60 * time.Second +const ( + // DefaultPodInitialBackoffDuration is the default value for the initial backoff duration + // for unschedulable pods. To change the default podInitialBackoffDurationSeconds used by the + // scheduler, update the ComponentConfig value in defaults.go + DefaultPodInitialBackoffDuration time.Duration = 1 * time.Second + // DefaultPodMaxBackoffDuration is the default value for the max backoff duration + // for unschedulable pods. To change the default podMaxBackoffDurationSeconds used by the + // scheduler, update the ComponentConfig value in defaults.go + DefaultPodMaxBackoffDuration time.Duration = 10 * time.Second +) // SchedulingQueue is an interface for a queue to store pods waiting to be scheduled. // The interface follows a pattern similar to cache.FIFO and cache.Heap and // makes it easy to use those data structures as a SchedulingQueue. type SchedulingQueue interface { Add(pod *v1.Pod) error - AddIfNotPresent(pod *v1.Pod) error // AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue. // The podSchedulingCycle represents the current scheduling cycle number which can be // returned by calling SchedulingCycle(). - AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error + AddUnschedulableIfNotPresent(pod *framework.PodInfo, podSchedulingCycle int64) error // SchedulingCycle returns the current number of scheduling cycle which is // cached by scheduling queue. Normally, incrementing this number whenever // a pod is popped (e.g. called Pop()) is enough. SchedulingCycle() int64 // Pop removes the head of the queue and returns it. It blocks if the // queue is empty and waits until a new item is added to the queue. - Pop() (*v1.Pod, error) + Pop() (*framework.PodInfo, error) Update(oldPod, newPod *v1.Pod) error Delete(pod *v1.Pod) error - MoveAllToActiveQueue() + MoveAllToActiveOrBackoffQueue(event string) AssignedPodAdded(pod *v1.Pod) AssignedPodUpdated(pod *v1.Pod) NominatedPodsForNode(nodeName string) []*v1.Pod @@ -88,8 +100,8 @@ type SchedulingQueue interface { } // NewSchedulingQueue initializes a priority queue as a new scheduling queue. -func NewSchedulingQueue(stop <-chan struct{}, fwk framework.Framework) SchedulingQueue { - return NewPriorityQueue(stop, fwk) +func NewSchedulingQueue(stop <-chan struct{}, fwk framework.Framework, opts ...Option) SchedulingQueue { + return NewPriorityQueue(stop, fwk, opts...) } // NominatedNodeName returns nominated node name of a Pod. @@ -115,10 +127,10 @@ type PriorityQueue struct { // activeQ is heap structure that scheduler actively looks at to find pods to // schedule. Head of heap is the highest priority pod. - activeQ *util.Heap + activeQ *heap.Heap // podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff // are popped from this heap before the scheduler looks at activeQ - podBackoffQ *util.Heap + podBackoffQ *heap.Heap // unschedulableQ holds pods that have been tried and determined unschedulable. unschedulableQ *UnschedulablePodsMap // nominatedPods is a structures that stores pods which are nominated to run @@ -138,8 +150,44 @@ type PriorityQueue struct { closed bool } +type priorityQueueOptions struct { + clock util.Clock + podInitialBackoffDuration time.Duration + podMaxBackoffDuration time.Duration +} + +// Option configures a PriorityQueue +type Option func(*priorityQueueOptions) + +// WithClock sets clock for PriorityQueue, the default clock is util.RealClock. +func WithClock(clock util.Clock) Option { + return func(o *priorityQueueOptions) { + o.clock = clock + } +} + +// WithPodInitialBackoffDuration sets pod initial backoff duration for PriorityQueue, +func WithPodInitialBackoffDuration(duration time.Duration) Option { + return func(o *priorityQueueOptions) { + o.podInitialBackoffDuration = duration + } +} + +// WithPodMaxBackoffDuration sets pod max backoff duration for PriorityQueue, +func WithPodMaxBackoffDuration(duration time.Duration) Option { + return func(o *priorityQueueOptions) { + o.podMaxBackoffDuration = duration + } +} + +var defaultPriorityQueueOptions = priorityQueueOptions{ + clock: util.RealClock{}, + podInitialBackoffDuration: DefaultPodInitialBackoffDuration, + podMaxBackoffDuration: DefaultPodMaxBackoffDuration, +} + // Making sure that PriorityQueue implements SchedulingQueue. -var _ = SchedulingQueue(&PriorityQueue{}) +var _ SchedulingQueue = &PriorityQueue{} // newPodInfoNoTimestamp builds a PodInfo object without timestamp. func newPodInfoNoTimestamp(pod *v1.Pod) *framework.PodInfo { @@ -154,18 +202,22 @@ func newPodInfoNoTimestamp(pod *v1.Pod) *framework.PodInfo { func activeQComp(podInfo1, podInfo2 interface{}) bool { pInfo1 := podInfo1.(*framework.PodInfo) pInfo2 := podInfo2.(*framework.PodInfo) - prio1 := util.GetPodPriority(pInfo1.Pod) - prio2 := util.GetPodPriority(pInfo2.Pod) + prio1 := pod.GetPodPriority(pInfo1.Pod) + prio2 := pod.GetPodPriority(pInfo2.Pod) return (prio1 > prio2) || (prio1 == prio2 && pInfo1.Timestamp.Before(pInfo2.Timestamp)) } // NewPriorityQueue creates a PriorityQueue object. -func NewPriorityQueue(stop <-chan struct{}, fwk framework.Framework) *PriorityQueue { - return NewPriorityQueueWithClock(stop, util.RealClock{}, fwk) -} +func NewPriorityQueue( + stop <-chan struct{}, + fwk framework.Framework, + opts ...Option, +) *PriorityQueue { + options := defaultPriorityQueueOptions + for _, opt := range opts { + opt(&options) + } -// NewPriorityQueueWithClock creates a PriorityQueue which uses the passed clock for time. -func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock, fwk framework.Framework) *PriorityQueue { comp := activeQComp if fwk != nil { if queueSortFunc := fwk.QueueSortFunc(); queueSortFunc != nil { @@ -179,16 +231,16 @@ func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock, fwk frame } pq := &PriorityQueue{ - clock: clock, + clock: options.clock, stop: stop, - podBackoff: NewPodBackoffMap(1*time.Second, 10*time.Second), - activeQ: util.NewHeapWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()), + podBackoff: NewPodBackoffMap(options.podInitialBackoffDuration, options.podMaxBackoffDuration), + activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()), unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()), nominatedPods: newNominatedPodMap(), moveRequestCycle: -1, } pq.cond.L = &pq.lock - pq.podBackoffQ = util.NewHeapWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder()) + pq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder()) pq.run() @@ -219,38 +271,13 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error { if err := p.podBackoffQ.Delete(pInfo); err == nil { klog.Errorf("Error: pod %v/%v is already in the podBackoff queue.", pod.Namespace, pod.Name) } + metrics.SchedulerQueueIncomingPods.WithLabelValues("active", PodAdd).Inc() p.nominatedPods.add(pod, "") p.cond.Broadcast() return nil } -// AddIfNotPresent adds a pod to the active queue if it is not present in any of -// the queues. If it is present in any, it doesn't do any thing. -func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { - p.lock.Lock() - defer p.lock.Unlock() - if p.unschedulableQ.get(pod) != nil { - return nil - } - - pInfo := p.newPodInfo(pod) - if _, exists, _ := p.activeQ.Get(pInfo); exists { - return nil - } - if _, exists, _ := p.podBackoffQ.Get(pInfo); exists { - return nil - } - err := p.activeQ.Add(pInfo) - if err != nil { - klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) - } else { - p.nominatedPods.add(pod, "") - p.cond.Broadcast() - } - return err -} - // nsNameForPod returns a namespacedname for a pod func nsNameForPod(pod *v1.Pod) ktypes.NamespacedName { return ktypes.NamespacedName{ @@ -297,14 +324,16 @@ func (p *PriorityQueue) SchedulingCycle() int64 { // the queue, unless it is already in the queue. Normally, PriorityQueue puts // unschedulable pods in `unschedulableQ`. But if there has been a recent move // request, then the pod is put in `podBackoffQ`. -func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error { +func (p *PriorityQueue) AddUnschedulableIfNotPresent(pInfo *framework.PodInfo, podSchedulingCycle int64) error { p.lock.Lock() defer p.lock.Unlock() + pod := pInfo.Pod if p.unschedulableQ.get(pod) != nil { return fmt.Errorf("pod is already present in unschedulableQ") } - pInfo := p.newPodInfo(pod) + // Refresh the timestamp since the pod is re-added. + pInfo.Timestamp = p.clock.Now() if _, exists, _ := p.activeQ.Get(pInfo); exists { return fmt.Errorf("pod is already present in the activeQ") } @@ -321,8 +350,10 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingC if err := p.podBackoffQ.Add(pInfo); err != nil { return fmt.Errorf("error adding pod %v to the backoff queue: %v", pod.Name, err) } + metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", ScheduleAttemptFailure).Inc() } else { p.unschedulableQ.addOrUpdate(pInfo) + metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", ScheduleAttemptFailure).Inc() } p.nominatedPods.add(pod, "") @@ -334,7 +365,6 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingC func (p *PriorityQueue) flushBackoffQCompleted() { p.lock.Lock() defer p.lock.Unlock() - for { rawPodInfo := p.podBackoffQ.Peek() if rawPodInfo == nil { @@ -346,6 +376,7 @@ func (p *PriorityQueue) flushBackoffQCompleted() { klog.Errorf("Unable to find backoff value for pod %v in backoffQ", nsNameForPod(pod)) p.podBackoffQ.Pop() p.activeQ.Add(rawPodInfo) + metrics.SchedulerQueueIncomingPods.WithLabelValues("active", BackoffComplete).Inc() defer p.cond.Broadcast() continue } @@ -359,6 +390,7 @@ func (p *PriorityQueue) flushBackoffQCompleted() { return } p.activeQ.Add(rawPodInfo) + metrics.SchedulerQueueIncomingPods.WithLabelValues("active", BackoffComplete).Inc() defer p.cond.Broadcast() } } @@ -379,14 +411,14 @@ func (p *PriorityQueue) flushUnschedulableQLeftover() { } if len(podsToMove) > 0 { - p.movePodsToActiveQueue(podsToMove) + p.movePodsToActiveOrBackoffQueue(podsToMove, UnschedulableTimeout) } } // Pop removes the head of the active queue and returns it. It blocks if the // activeQ is empty and waits until a new item is added to the queue. It // increments scheduling cycle when a pod is popped. -func (p *PriorityQueue) Pop() (*v1.Pod, error) { +func (p *PriorityQueue) Pop() (*framework.PodInfo, error) { p.lock.Lock() defer p.lock.Unlock() for p.activeQ.Len() == 0 { @@ -403,8 +435,9 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { return nil, err } pInfo := obj.(*framework.PodInfo) + pInfo.Attempts++ p.schedulingCycle++ - return pInfo.Pod, err + return pInfo, err } // isPodUpdated checks if the pod is updated in a way that it may have become @@ -433,19 +466,15 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { // If the pod is already in the active queue, just update it there. if oldPodInfo, exists, _ := p.activeQ.Get(oldPodInfo); exists { p.nominatedPods.update(oldPod, newPod) - newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.Timestamp = oldPodInfo.(*framework.PodInfo).Timestamp - err := p.activeQ.Update(newPodInfo) + err := p.activeQ.Update(updatePod(oldPodInfo, newPod)) return err } // If the pod is in the backoff queue, update it there. if oldPodInfo, exists, _ := p.podBackoffQ.Get(oldPodInfo); exists { p.nominatedPods.update(oldPod, newPod) - p.podBackoffQ.Delete(newPodInfoNoTimestamp(oldPod)) - newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.Timestamp = oldPodInfo.(*framework.PodInfo).Timestamp - err := p.activeQ.Add(newPodInfo) + p.podBackoffQ.Delete(oldPodInfo) + err := p.activeQ.Add(updatePod(oldPodInfo, newPod)) if err == nil { p.cond.Broadcast() } @@ -456,20 +485,18 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { // If the pod is in the unschedulable queue, updating it may make it schedulable. if usPodInfo := p.unschedulableQ.get(newPod); usPodInfo != nil { p.nominatedPods.update(oldPod, newPod) - newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.Timestamp = usPodInfo.Timestamp if isPodUpdated(oldPod, newPod) { // If the pod is updated reset backoff p.clearPodBackoff(newPod) p.unschedulableQ.delete(usPodInfo.Pod) - err := p.activeQ.Add(newPodInfo) + err := p.activeQ.Add(updatePod(usPodInfo, newPod)) if err == nil { p.cond.Broadcast() } return err } // Pod is already in unschedulable queue and hasnt updated, no need to backoff again - p.unschedulableQ.addOrUpdate(newPodInfo) + p.unschedulableQ.addOrUpdate(updatePod(usPodInfo, newPod)) return nil } // If pod is not in any of the queues, we put it in the active queue. @@ -500,7 +527,7 @@ func (p *PriorityQueue) Delete(pod *v1.Pod) error { // may make pending pods with matching affinity terms schedulable. func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) { p.lock.Lock() - p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) + p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodAdd) p.lock.Unlock() } @@ -508,60 +535,42 @@ func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) { // may make pending pods with matching affinity terms schedulable. func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { p.lock.Lock() - p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) + p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate) p.lock.Unlock() } -// MoveAllToActiveQueue moves all pods from unschedulableQ to activeQ. This +// MoveAllToActiveOrBackoffQueue moves all pods from unschedulableQ to activeQ. This // function adds all pods and then signals the condition variable to ensure that // if Pop() is waiting for an item, it receives it after all the pods are in the // queue and the head is the highest priority pod. -func (p *PriorityQueue) MoveAllToActiveQueue() { +func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(event string) { p.lock.Lock() defer p.lock.Unlock() - - // There is a chance of errors when adding pods to other queues, - // we make a temporary slice to store the pods, - // since the probability is low, we set its len to 0 - addErrorPods := make([]*framework.PodInfo, 0) - + unschedulablePods := make([]*framework.PodInfo, 0, len(p.unschedulableQ.podInfoMap)) for _, pInfo := range p.unschedulableQ.podInfoMap { - pod := pInfo.Pod - if p.isPodBackingOff(pod) { - if err := p.podBackoffQ.Add(pInfo); err != nil { - klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) - addErrorPods = append(addErrorPods, pInfo) - } - } else { - if err := p.activeQ.Add(pInfo); err != nil { - klog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) - addErrorPods = append(addErrorPods, pInfo) - } - } - } - p.unschedulableQ.clear() - // Adding pods that we could not move to Active queue or Backoff queue back to the Unschedulable queue - for _, podInfo := range addErrorPods { - p.unschedulableQ.addOrUpdate(podInfo) + unschedulablePods = append(unschedulablePods, pInfo) } + p.movePodsToActiveOrBackoffQueue(unschedulablePods, event) p.moveRequestCycle = p.schedulingCycle p.cond.Broadcast() } // NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) movePodsToActiveQueue(podInfoList []*framework.PodInfo) { +func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework.PodInfo, event string) { for _, pInfo := range podInfoList { pod := pInfo.Pod if p.isPodBackingOff(pod) { if err := p.podBackoffQ.Add(pInfo); err != nil { klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) } else { + metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc() p.unschedulableQ.delete(pod) } } else { if err := p.activeQ.Add(pInfo); err != nil { klog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) } else { + metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event).Inc() p.unschedulableQ.delete(pod) } } @@ -665,18 +674,20 @@ func (p *PriorityQueue) NumUnschedulablePods() int { // newPodInfo builds a PodInfo object. func (p *PriorityQueue) newPodInfo(pod *v1.Pod) *framework.PodInfo { - if p.clock == nil { - return &framework.PodInfo{ - Pod: pod, - } - } - + now := p.clock.Now() return &framework.PodInfo{ - Pod: pod, - Timestamp: p.clock.Now(), + Pod: pod, + Timestamp: now, + InitialAttemptTimestamp: now, } } +func updatePod(oldPodInfo interface{}, newPod *v1.Pod) *framework.PodInfo { + pInfo := oldPodInfo.(*framework.PodInfo) + pInfo.Pod = newPod + return pInfo +} + // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { @@ -823,12 +834,12 @@ func newNominatedPodMap() *nominatedPodMap { // MakeNextPodFunc returns a function to retrieve the next pod from a given // scheduling queue -func MakeNextPodFunc(queue SchedulingQueue) func() *v1.Pod { - return func() *v1.Pod { - pod, err := queue.Pop() +func MakeNextPodFunc(queue SchedulingQueue) func() *framework.PodInfo { + return func() *framework.PodInfo { + podInfo, err := queue.Pop() if err == nil { - klog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name) - return pod + klog.V(4).Infof("About to try and schedule pod %v/%v", podInfo.Pod.Namespace, podInfo.Pod.Name) + return podInfo } klog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/BUILD new file mode 100644 index 00000000000..3e8d3f4ca68 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["listers.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/listers", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/scheduler/listers/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go new file mode 100644 index 00000000000..984493f0e17 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listers + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// PodFilter is a function to filter a pod. If pod passed return true else return false. +type PodFilter func(*v1.Pod) bool + +// PodLister interface represents anything that can list pods for a scheduler. +type PodLister interface { + // Returns the list of pods. + List(labels.Selector) ([]*v1.Pod, error) + // This is similar to "List()", but the returned slice does not + // contain pods that don't pass `podFilter`. + FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) +} + +// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name. +type NodeInfoLister interface { + // Returns the list of NodeInfos. + List() ([]*schedulernodeinfo.NodeInfo, error) + // Returns the list of NodeInfos of nodes with pods with affinity terms. + HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) + // Returns the NodeInfo of the given node name. + Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) +} + +// SharedLister groups scheduler-specific listers. +type SharedLister interface { + Pods() PodLister + NodeInfos() NodeInfoLister +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD index f698d44ad16..7cd56dda5b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD @@ -10,10 +10,9 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/scheduler/metrics", deps = [ - "//pkg/controller/volume/scheduling:go_default_library", + "//pkg/controller/volume/scheduling/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go index 95687dbf35d..a98d59cfb70 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go @@ -20,11 +20,9 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" - volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling" + volumeschedulingmetrics "k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics" ) const ( @@ -60,11 +58,11 @@ var ( StabilityLevel: metrics.ALPHA, }, []string{"result"}) // PodScheduleSuccesses counts how many pods were scheduled. - PodScheduleSuccesses = scheduleAttempts.With(prometheus.Labels{"result": "scheduled"}) + PodScheduleSuccesses = scheduleAttempts.With(metrics.Labels{"result": "scheduled"}) // PodScheduleFailures counts how many pods could not be scheduled. - PodScheduleFailures = scheduleAttempts.With(prometheus.Labels{"result": "unschedulable"}) + PodScheduleFailures = scheduleAttempts.With(metrics.Labels{"result": "unschedulable"}) // PodScheduleErrors counts how many pods could not be scheduled due to a scheduler error. - PodScheduleErrors = scheduleAttempts.With(prometheus.Labels{"result": "error"}) + PodScheduleErrors = scheduleAttempts.With(metrics.Labels{"result": "error"}) SchedulingLatency = metrics.NewSummaryVec( &metrics.SummaryOpts{ Subsystem: SchedulerSubsystem, @@ -81,11 +79,12 @@ var ( &metrics.SummaryOpts{ Subsystem: SchedulerSubsystem, Name: DeprecatedSchedulingLatencyName, - Help: "(Deprecated) Scheduling latency in seconds split by sub-parts of the scheduling operation", + Help: "Scheduling latency in seconds split by sub-parts of the scheduling operation", // Make the sliding window of 5h. // TODO: The value for this should be based on some SLI definition (long term). - MaxAge: 5 * time.Hour, - StabilityLevel: metrics.ALPHA, + MaxAge: 5 * time.Hour, + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, []string{OperationLabel}, ) @@ -94,17 +93,18 @@ var ( Subsystem: SchedulerSubsystem, Name: "e2e_scheduling_duration_seconds", Help: "E2e scheduling latency in seconds (scheduling algorithm + binding)", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedE2eSchedulingLatency = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "e2e_scheduling_latency_microseconds", - Help: "(Deprecated) E2e scheduling latency in microseconds (scheduling algorithm + binding)", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "e2e_scheduling_latency_microseconds", + Help: "E2e scheduling latency in microseconds (scheduling algorithm + binding)", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) SchedulingAlgorithmLatency = metrics.NewHistogram( @@ -112,17 +112,18 @@ var ( Subsystem: SchedulerSubsystem, Name: "scheduling_algorithm_duration_seconds", Help: "Scheduling algorithm latency in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedSchedulingAlgorithmLatency = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_algorithm_latency_microseconds", - Help: "(Deprecated) Scheduling algorithm latency in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "scheduling_algorithm_latency_microseconds", + Help: "Scheduling algorithm latency in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) SchedulingAlgorithmPredicateEvaluationDuration = metrics.NewHistogram( @@ -130,17 +131,18 @@ var ( Subsystem: SchedulerSubsystem, Name: "scheduling_algorithm_predicate_evaluation_seconds", Help: "Scheduling algorithm predicate evaluation duration in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedSchedulingAlgorithmPredicateEvaluationDuration = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_algorithm_predicate_evaluation", - Help: "(Deprecated) Scheduling algorithm predicate evaluation duration in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "scheduling_algorithm_predicate_evaluation", + Help: "Scheduling algorithm predicate evaluation duration in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) SchedulingAlgorithmPriorityEvaluationDuration = metrics.NewHistogram( @@ -148,35 +150,37 @@ var ( Subsystem: SchedulerSubsystem, Name: "scheduling_algorithm_priority_evaluation_seconds", Help: "Scheduling algorithm priority evaluation duration in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedSchedulingAlgorithmPriorityEvaluationDuration = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_algorithm_priority_evaluation", - Help: "(Deprecated) Scheduling algorithm priority evaluation duration in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "scheduling_algorithm_priority_evaluation", + Help: "Scheduling algorithm priority evaluation duration in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) - SchedulingAlgorithmPremptionEvaluationDuration = metrics.NewHistogram( + SchedulingAlgorithmPreemptionEvaluationDuration = metrics.NewHistogram( &metrics.HistogramOpts{ Subsystem: SchedulerSubsystem, Name: "scheduling_algorithm_preemption_evaluation_seconds", Help: "Scheduling algorithm preemption evaluation duration in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) - DeprecatedSchedulingAlgorithmPremptionEvaluationDuration = metrics.NewHistogram( + DeprecatedSchedulingAlgorithmPreemptionEvaluationDuration = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_algorithm_preemption_evaluation", - Help: "(Deprecated) Scheduling algorithm preemption evaluation duration in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "scheduling_algorithm_preemption_evaluation", + Help: "Scheduling algorithm preemption evaluation duration in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) BindingLatency = metrics.NewHistogram( @@ -184,24 +188,27 @@ var ( Subsystem: SchedulerSubsystem, Name: "binding_duration_seconds", Help: "Binding latency in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, }, ) DeprecatedBindingLatency = metrics.NewHistogram( &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "binding_latency_microseconds", - Help: "(Deprecated) Binding latency in microseconds", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - StabilityLevel: metrics.ALPHA, + Subsystem: SchedulerSubsystem, + Name: "binding_latency_microseconds", + Help: "Binding latency in microseconds", + Buckets: metrics.ExponentialBuckets(1000, 2, 15), + StabilityLevel: metrics.ALPHA, + DeprecatedVersion: "1.14.0", }, ) - PreemptionVictims = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "pod_preemption_victims", - Help: "Number of selected preemption victims", + PreemptionVictims = metrics.NewHistogram( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "pod_preemption_victims", + Help: "Number of selected preemption victims", + // we think #victims>50 is pretty rare, therefore [50, +Inf) is considered a single bucket. + Buckets: metrics.LinearBuckets(5, 5, 10), StabilityLevel: metrics.ALPHA, }) PreemptionAttempts = metrics.NewCounter( @@ -211,7 +218,6 @@ var ( Help: "Total preemption attempts in the cluster till now", StabilityLevel: metrics.ALPHA, }) - pendingPods = metrics.NewGaugeVec( &metrics.GaugeOpts{ Subsystem: SchedulerSubsystem, @@ -219,6 +225,81 @@ var ( Help: "Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.", StabilityLevel: metrics.ALPHA, }, []string{"queue"}) + SchedulerGoroutines = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: SchedulerSubsystem, + Name: "scheduler_goroutines", + Help: "Number of running goroutines split by the work they do such as binding.", + StabilityLevel: metrics.ALPHA, + }, []string{"work"}) + + PodSchedulingDuration = metrics.NewHistogram( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "pod_scheduling_duration_seconds", + Help: "E2e latency for a pod being scheduled which may include multiple scheduling attempts.", + // Start with 1ms with the last bucket being [~16s, Inf) + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }) + + PodSchedulingAttempts = metrics.NewHistogram( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "pod_scheduling_attempts", + Help: "Number of attempts to successfully schedule a pod.", + Buckets: metrics.ExponentialBuckets(1, 2, 5), + StabilityLevel: metrics.ALPHA, + }) + + FrameworkExtensionPointDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "framework_extension_point_duration_seconds", + Help: "Latency for running all plugins of a specific extension point.", + // Start with 0.1ms with the last bucket being [~200ms, Inf) + Buckets: metrics.ExponentialBuckets(0.0001, 2, 12), + StabilityLevel: metrics.ALPHA, + }, + []string{"extension_point", "status"}) + + PluginExecutionDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "plugin_execution_duration_seconds", + Help: "Duration for running a plugin at a specific extension point.", + // Start with 0.01ms with the last bucket being [~22ms, Inf). We use a small factor (1.5) + // so that we have better granularity since plugin latency is very sensitive. + Buckets: metrics.ExponentialBuckets(0.00001, 1.5, 20), + StabilityLevel: metrics.ALPHA, + }, + []string{"plugin", "extension_point", "status"}) + + SchedulerQueueIncomingPods = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: SchedulerSubsystem, + Name: "queue_incoming_pods_total", + Help: "Number of pods added to scheduling queues by event and queue type.", + StabilityLevel: metrics.ALPHA, + }, []string{"queue", "event"}) + + PermitWaitDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: SchedulerSubsystem, + Name: "permit_wait_duration_seconds", + Help: "Duration of waiting in RunPermitPlugins.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}) + + CacheSize = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: SchedulerSubsystem, + Name: "scheduler_cache_size", + Help: "Number of nodes, pods, and assumed (bound) pods in the scheduler cache.", + StabilityLevel: metrics.ALPHA, + }, []string{"type"}) metricsList = []metrics.Registerable{ scheduleAttempts, @@ -234,11 +315,19 @@ var ( DeprecatedSchedulingAlgorithmPredicateEvaluationDuration, SchedulingAlgorithmPriorityEvaluationDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationDuration, - SchedulingAlgorithmPremptionEvaluationDuration, - DeprecatedSchedulingAlgorithmPremptionEvaluationDuration, + SchedulingAlgorithmPreemptionEvaluationDuration, + DeprecatedSchedulingAlgorithmPreemptionEvaluationDuration, PreemptionVictims, PreemptionAttempts, pendingPods, + PodSchedulingDuration, + PodSchedulingAttempts, + FrameworkExtensionPointDuration, + PluginExecutionDuration, + SchedulerQueueIncomingPods, + SchedulerGoroutines, + PermitWaitDuration, + CacheSize, } ) @@ -251,23 +340,28 @@ func Register() { for _, metric := range metricsList { legacyregistry.MustRegister(metric) } - volumescheduling.RegisterVolumeSchedulingMetrics() + volumeschedulingmetrics.RegisterVolumeSchedulingMetrics() }) } +// GetGather returns the gatherer. It used by test case outside current package. +func GetGather() metrics.Gatherer { + return legacyregistry.DefaultGatherer +} + // ActivePods returns the pending pods metrics with the label active func ActivePods() metrics.GaugeMetric { - return pendingPods.With(prometheus.Labels{"queue": "active"}) + return pendingPods.With(metrics.Labels{"queue": "active"}) } // BackoffPods returns the pending pods metrics with the label backoff func BackoffPods() metrics.GaugeMetric { - return pendingPods.With(prometheus.Labels{"queue": "backoff"}) + return pendingPods.With(metrics.Labels{"queue": "backoff"}) } // UnschedulablePods returns the pending pods metrics with the label unschedulable func UnschedulablePods() metrics.GaugeMetric { - return pendingPods.With(prometheus.Labels{"queue": "unschedulable"}) + return pendingPods.With(metrics.Labels{"queue": "unschedulable"}) } // Reset resets metrics diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/BUILD index ec7644e790a..a33c84cdee3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/BUILD @@ -5,8 +5,6 @@ go_library( srcs = [ "host_ports.go", "node_info.go", - "snapshot.go", - "util.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", visibility = ["//visibility:public"], @@ -16,7 +14,6 @@ go_library( "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -27,7 +24,6 @@ go_test( srcs = [ "host_ports_test.go", "node_info_test.go", - "util_test.go", ], embed = [":go_default_library"], deps = [ @@ -36,7 +32,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", ], @@ -51,7 +46,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/scheduler/nodeinfo/snapshot:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go index f1e3a9de527..3d7e4d0dcd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go @@ -357,30 +357,6 @@ func (n *NodeInfo) SetTaints(newTaints []v1.Taint) { n.taints = newTaints } -// MemoryPressureCondition returns the memory pressure condition status on this node. -func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.memoryPressureCondition -} - -// DiskPressureCondition returns the disk pressure condition status on this node. -func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.diskPressureCondition -} - -// PIDPressureCondition returns the pid pressure condition status on this node. -func (n *NodeInfo) PIDPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.pidPressureCondition -} - // RequestedResource returns aggregated resource request of pods on this node. func (n *NodeInfo) RequestedResource() Resource { if n == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/BUILD new file mode 100644 index 00000000000..c89f823f782 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["snapshot_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["snapshot.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/snapshot.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/snapshot.go new file mode 100644 index 00000000000..46134a76f32 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot/snapshot.go @@ -0,0 +1,196 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshot + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a +// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. +type Snapshot struct { + // NodeInfoMap a map of node name to a snapshot of its NodeInfo. + NodeInfoMap map[string]*schedulernodeinfo.NodeInfo + // NodeInfoList is the list of nodes as ordered in the cache's nodeTree. + NodeInfoList []*schedulernodeinfo.NodeInfo + // HavePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. + HavePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo + Generation int64 +} + +var _ schedulerlisters.SharedLister = &Snapshot{} + +// NewEmptySnapshot initializes a Snapshot struct and returns it. +func NewEmptySnapshot() *Snapshot { + return &Snapshot{ + NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), + } +} + +// NewSnapshot initializes a Snapshot struct and returns it. +func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot { + nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) + havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) + for _, v := range nodeInfoMap { + nodeInfoList = append(nodeInfoList, v) + if len(v.PodsWithAffinity()) > 0 { + havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v) + } + } + + s := NewEmptySnapshot() + s.NodeInfoMap = nodeInfoMap + s.NodeInfoList = nodeInfoList + s.HavePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList + + return s +} + +// CreateNodeInfoMap obtains a list of pods and pivots that list into a map where the keys are node names +// and the values are the aggregated information for that node. +func CreateNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { + nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo) + for _, pod := range pods { + nodeName := pod.Spec.NodeName + if _, ok := nodeNameToInfo[nodeName]; !ok { + nodeNameToInfo[nodeName] = schedulernodeinfo.NewNodeInfo() + } + nodeNameToInfo[nodeName].AddPod(pod) + } + imageExistenceMap := createImageExistenceMap(nodes) + + for _, node := range nodes { + if _, ok := nodeNameToInfo[node.Name]; !ok { + nodeNameToInfo[node.Name] = schedulernodeinfo.NewNodeInfo() + } + nodeInfo := nodeNameToInfo[node.Name] + nodeInfo.SetNode(node) + nodeInfo.SetImageStates(getNodeImageStates(node, imageExistenceMap)) + } + return nodeNameToInfo +} + +// getNodeImageStates returns the given node's image states based on the given imageExistence map. +func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulernodeinfo.ImageStateSummary { + imageStates := make(map[string]*schedulernodeinfo.ImageStateSummary) + + for _, image := range node.Status.Images { + for _, name := range image.Names { + imageStates[name] = &schedulernodeinfo.ImageStateSummary{ + Size: image.SizeBytes, + NumNodes: len(imageExistenceMap[name]), + } + } + } + return imageStates +} + +// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. +func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { + imageExistenceMap := make(map[string]sets.String) + for _, node := range nodes { + for _, image := range node.Status.Images { + for _, name := range image.Names { + if _, ok := imageExistenceMap[name]; !ok { + imageExistenceMap[name] = sets.NewString(node.Name) + } else { + imageExistenceMap[name].Insert(node.Name) + } + } + } + } + return imageExistenceMap +} + +// Pods returns a PodLister +func (s *Snapshot) Pods() schedulerlisters.PodLister { + return &podLister{snapshot: s} +} + +// NodeInfos returns a NodeInfoLister. +func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister { + return &nodeInfoLister{snapshot: s} +} + +// ListNodes returns the list of nodes in the snapshot. +func (s *Snapshot) ListNodes() []*v1.Node { + nodes := make([]*v1.Node, 0, len(s.NodeInfoMap)) + for _, n := range s.NodeInfoList { + if n.Node() != nil { + nodes = append(nodes, n.Node()) + } + } + return nodes +} + +type podLister struct { + snapshot *Snapshot +} + +// List returns the list of pods in the snapshot. +func (p *podLister) List(selector labels.Selector) ([]*v1.Pod, error) { + alwaysTrue := func(p *v1.Pod) bool { return true } + return p.FilteredList(alwaysTrue, selector) +} + +// FilteredList returns a filtered list of pods in the snapshot. +func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { + // podFilter is expected to return true for most or all of the pods. We + // can avoid expensive array growth without wasting too much memory by + // pre-allocating capacity. + maxSize := 0 + for _, n := range p.snapshot.NodeInfoMap { + maxSize += len(n.Pods()) + } + pods := make([]*v1.Pod, 0, maxSize) + for _, n := range p.snapshot.NodeInfoMap { + for _, pod := range n.Pods() { + if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { + pods = append(pods, pod) + } + } + } + return pods, nil +} + +type nodeInfoLister struct { + snapshot *Snapshot +} + +// List returns the list of nodes in the snapshot. +func (n *nodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) { + return n.snapshot.NodeInfoList, nil +} + +// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity +func (n *nodeInfoLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { + return n.snapshot.HavePodsWithAffinityNodeInfoList, nil +} + +// Returns the NodeInfo of the given node name. +func (n *nodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { + if v, ok := n.snapshot.NodeInfoMap[nodeName]; ok { + return v, nil + } + return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/util.go deleted file mode 100644 index bb1fd0ce612..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/util.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeinfo - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names -// and the values are the aggregated information for that node. -func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo { - nodeNameToInfo := make(map[string]*NodeInfo) - for _, pod := range pods { - nodeName := pod.Spec.NodeName - if _, ok := nodeNameToInfo[nodeName]; !ok { - nodeNameToInfo[nodeName] = NewNodeInfo() - } - nodeNameToInfo[nodeName].AddPod(pod) - } - imageExistenceMap := createImageExistenceMap(nodes) - - for _, node := range nodes { - if _, ok := nodeNameToInfo[node.Name]; !ok { - nodeNameToInfo[node.Name] = NewNodeInfo() - } - nodeInfo := nodeNameToInfo[node.Name] - nodeInfo.SetNode(node) - nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap) - } - return nodeNameToInfo -} - -// getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary { - imageStates := make(map[string]*ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - imageStates[name] = &ImageStateSummary{ - Size: image.SizeBytes, - NumNodes: len(imageExistenceMap[name]), - } - } - } - return imageStates -} - -// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. -func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { - imageExistenceMap := make(map[string]sets.String) - for _, node := range nodes { - for _, image := range node.Status.Images { - for _, name := range image.Names { - if _, ok := imageExistenceMap[name]; !ok { - imageExistenceMap[name] = sets.NewString(node.Name) - } else { - imageExistenceMap[name].Insert(node.Name) - } - } - } - } - return imageExistenceMap -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go index f22cbfd9521..4e1d522145c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go @@ -17,8 +17,10 @@ limitations under the License. package scheduler import ( + "context" "fmt" "io/ioutil" + "math/rand" "os" "time" @@ -28,22 +30,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" - appsinformers "k8s.io/client-go/informers/apps/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" - policyinformers "k8s.io/client-go/informers/policy/v1beta1" - storageinformersv1 "k8s.io/client-go/informers/storage/v1" - storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/events" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" - kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + kubefeatures "k8s.io/kubernetes/pkg/features" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" "k8s.io/kubernetes/pkg/scheduler/core" - "k8s.io/kubernetes/pkg/scheduler/factory" + frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -52,8 +55,27 @@ const ( BindTimeoutSeconds = 100 // SchedulerError is the reason recorded for events when an error occurs during scheduling a pod. SchedulerError = "SchedulerError" + // Percentage of framework metrics to be sampled. + frameworkMetricsSamplePercent = 10 ) +// podConditionUpdater updates the condition of a pod based on the passed +// PodCondition +// TODO (ahmad-diaa): Remove type and replace it with scheduler methods +type podConditionUpdater interface { + update(pod *v1.Pod, podCondition *v1.PodCondition) error +} + +// PodPreemptor has methods needed to delete a pod and to update 'NominatedPod' +// field of the preemptor pod. +// TODO (ahmad-diaa): Remove type and replace it with scheduler methods +type podPreemptor interface { + getUpdatedPod(pod *v1.Pod) (*v1.Pod, error) + deletePod(pod *v1.Pod) error + setNominatedNodeName(pod *v1.Pod, nominatedNode string) error + removeNominatedNodeName(pod *v1.Pod) error +} + // Scheduler watches for new unscheduled pods. It attempts to find // nodes that they fit on and writes bindings back to the api server. type Scheduler struct { @@ -62,14 +84,14 @@ type Scheduler struct { SchedulerCache internalcache.Cache Algorithm core.ScheduleAlgorithm - GetBinder func(pod *v1.Pod) factory.Binder + GetBinder func(pod *v1.Pod) Binder // PodConditionUpdater is used only in case of scheduling errors. If we succeed // with scheduling, PodScheduled condition will be updated in apiserver in /bind // handler so that binding and setting PodCondition it is atomic. - PodConditionUpdater factory.PodConditionUpdater + podConditionUpdater podConditionUpdater // PodPreemptor is used to evict pods and update 'NominatedNode' field of // the preemptor pod. - PodPreemptor factory.PodPreemptor + podPreemptor podPreemptor // Framework runs scheduler plugins at configured extension points. Framework framework.Framework @@ -77,15 +99,11 @@ type Scheduler struct { // is available. We don't use a channel for this, because scheduling // a pod may take some amount of time and we don't want pods to get // stale while they sit in a channel. - NextPod func() *v1.Pod - - // WaitForCacheSync waits for scheduler cache to populate. - // It returns true if it was successful, false if the controller should shutdown. - WaitForCacheSync func() bool + NextPod func() *framework.PodInfo // Error is called if there is an error. It is passed the pod in // question, and the error - Error func(*v1.Pod, error) + Error func(*framework.PodInfo, error) // Recorder is the EventRecorder to use Recorder events.EventRecorder @@ -101,6 +119,12 @@ type Scheduler struct { // SchedulingQueue holds pods to be scheduled SchedulingQueue internalqueue.SchedulingQueue + + scheduledPodsHasSynced func() bool + + // The final configuration of the framework. + Plugins schedulerapi.Plugins + PluginConfig []schedulerapi.PluginConfig } // Cache returns the cache in scheduler for test to check the data in scheduler. @@ -110,10 +134,20 @@ func (sched *Scheduler) Cache() internalcache.Cache { type schedulerOptions struct { schedulerName string + schedulerAlgorithmSource schedulerapi.SchedulerAlgorithmSource hardPodAffinitySymmetricWeight int32 disablePreemption bool percentageOfNodesToScore int32 bindTimeoutSeconds int64 + podInitialBackoffSeconds int64 + podMaxBackoffSeconds int64 + // Default registry contains all in-tree plugins + frameworkDefaultRegistry framework.Registry + // This registry contains out of tree plugins to be merged with default registry. + frameworkOutOfTreeRegistry framework.Registry + frameworkConfigProducerRegistry *frameworkplugins.ConfigProducerRegistry + frameworkPlugins *schedulerapi.Plugins + frameworkPluginConfig []schedulerapi.PluginConfig } // Option configures a Scheduler @@ -126,6 +160,13 @@ func WithName(schedulerName string) Option { } } +// WithAlgorithmSource sets schedulerAlgorithmSource for Scheduler, the default is a source with DefaultProvider. +func WithAlgorithmSource(source schedulerapi.SchedulerAlgorithmSource) Option { + return func(o *schedulerOptions) { + o.schedulerAlgorithmSource = source + } +} + // WithHardPodAffinitySymmetricWeight sets hardPodAffinitySymmetricWeight for Scheduler, the default value is 1 func WithHardPodAffinitySymmetricWeight(hardPodAffinitySymmetricWeight int32) Option { return func(o *schedulerOptions) { @@ -154,63 +195,148 @@ func WithBindTimeoutSeconds(bindTimeoutSeconds int64) Option { } } +// WithFrameworkDefaultRegistry sets the framework's default registry. This is only used in integration tests. +func WithFrameworkDefaultRegistry(registry framework.Registry) Option { + return func(o *schedulerOptions) { + o.frameworkDefaultRegistry = registry + } +} + +// WithFrameworkOutOfTreeRegistry sets the registry for out-of-tree plugins. Those plugins +// will be appended to the default registry. +func WithFrameworkOutOfTreeRegistry(registry framework.Registry) Option { + return func(o *schedulerOptions) { + o.frameworkOutOfTreeRegistry = registry + } +} + +// WithFrameworkConfigProducerRegistry sets the framework plugin producer registry. +func WithFrameworkConfigProducerRegistry(registry *frameworkplugins.ConfigProducerRegistry) Option { + return func(o *schedulerOptions) { + o.frameworkConfigProducerRegistry = registry + } +} + +// WithFrameworkPlugins sets the plugins that the framework should be configured with. +func WithFrameworkPlugins(plugins *schedulerapi.Plugins) Option { + return func(o *schedulerOptions) { + o.frameworkPlugins = plugins + } +} + +// WithFrameworkPluginConfig sets the PluginConfig slice that the framework should be configured with. +func WithFrameworkPluginConfig(pluginConfig []schedulerapi.PluginConfig) Option { + return func(o *schedulerOptions) { + o.frameworkPluginConfig = pluginConfig + } +} + +// WithPodInitialBackoffSeconds sets podInitialBackoffSeconds for Scheduler, the default value is 1 +func WithPodInitialBackoffSeconds(podInitialBackoffSeconds int64) Option { + return func(o *schedulerOptions) { + o.podInitialBackoffSeconds = podInitialBackoffSeconds + } +} + +// WithPodMaxBackoffSeconds sets podMaxBackoffSeconds for Scheduler, the default value is 10 +func WithPodMaxBackoffSeconds(podMaxBackoffSeconds int64) Option { + return func(o *schedulerOptions) { + o.podMaxBackoffSeconds = podMaxBackoffSeconds + } +} + var defaultSchedulerOptions = schedulerOptions{ - schedulerName: v1.DefaultSchedulerName, - hardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight, - disablePreemption: false, - percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore, - bindTimeoutSeconds: BindTimeoutSeconds, + schedulerName: v1.DefaultSchedulerName, + schedulerAlgorithmSource: schedulerapi.SchedulerAlgorithmSource{ + Provider: defaultAlgorithmSourceProviderName(), + }, + hardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight, + disablePreemption: false, + percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore, + bindTimeoutSeconds: BindTimeoutSeconds, + podInitialBackoffSeconds: int64(internalqueue.DefaultPodInitialBackoffDuration.Seconds()), + podMaxBackoffSeconds: int64(internalqueue.DefaultPodMaxBackoffDuration.Seconds()), + frameworkConfigProducerRegistry: frameworkplugins.NewDefaultConfigProducerRegistry(), + // The plugins and pluginConfig options are currently nil because we currently don't have + // "default" plugins. All plugins that we run through the framework currently come from two + // sources: 1) specified in component config, in which case those two options should be + // set using their corresponding With* functions, 2) predicate/priority-mapped plugins, which + // pluginConfigProducerRegistry contains a mapping for and produces their configurations. + // TODO(ahg-g) Once predicates and priorities are migrated to natively run as plugins, the + // below two parameters will be populated accordingly. + frameworkPlugins: nil, + frameworkPluginConfig: nil, } // New returns a Scheduler func New(client clientset.Interface, - nodeInformer coreinformers.NodeInformer, + informerFactory informers.SharedInformerFactory, podInformer coreinformers.PodInformer, - pvInformer coreinformers.PersistentVolumeInformer, - pvcInformer coreinformers.PersistentVolumeClaimInformer, - replicationControllerInformer coreinformers.ReplicationControllerInformer, - replicaSetInformer appsinformers.ReplicaSetInformer, - statefulSetInformer appsinformers.StatefulSetInformer, - serviceInformer coreinformers.ServiceInformer, - pdbInformer policyinformers.PodDisruptionBudgetInformer, - storageClassInformer storageinformersv1.StorageClassInformer, - csiNodeInformer storageinformersv1beta1.CSINodeInformer, recorder events.EventRecorder, - schedulerAlgorithmSource kubeschedulerconfig.SchedulerAlgorithmSource, stopCh <-chan struct{}, - registry framework.Registry, - plugins *kubeschedulerconfig.Plugins, - pluginConfig []kubeschedulerconfig.PluginConfig, - opts ...func(o *schedulerOptions)) (*Scheduler, error) { + opts ...Option) (*Scheduler, error) { + + stopEverything := stopCh + if stopEverything == nil { + stopEverything = wait.NeverStop + } options := defaultSchedulerOptions for _, opt := range opts { opt(&options) } - // Set up the configurator which can create schedulers from configs. - configurator := factory.NewConfigFactory(&factory.ConfigFactoryArgs{ - Client: client, - NodeInformer: nodeInformer, - PodInformer: podInformer, - PvInformer: pvInformer, - PvcInformer: pvcInformer, - ReplicationControllerInformer: replicationControllerInformer, - ReplicaSetInformer: replicaSetInformer, - StatefulSetInformer: statefulSetInformer, - ServiceInformer: serviceInformer, - PdbInformer: pdbInformer, - StorageClassInformer: storageClassInformer, - CSINodeInformer: csiNodeInformer, - HardPodAffinitySymmetricWeight: options.hardPodAffinitySymmetricWeight, - DisablePreemption: options.disablePreemption, - PercentageOfNodesToScore: options.percentageOfNodesToScore, - BindTimeoutSeconds: options.bindTimeoutSeconds, - Registry: registry, - Plugins: plugins, - PluginConfig: pluginConfig, - }) - var config *factory.Config - source := schedulerAlgorithmSource + + schedulerCache := internalcache.New(30*time.Second, stopEverything) + volumeBinder := volumebinder.NewVolumeBinder( + client, + informerFactory.Core().V1().Nodes(), + informerFactory.Storage().V1().CSINodes(), + informerFactory.Core().V1().PersistentVolumeClaims(), + informerFactory.Core().V1().PersistentVolumes(), + informerFactory.Storage().V1().StorageClasses(), + time.Duration(options.bindTimeoutSeconds)*time.Second, + ) + + registry := options.frameworkDefaultRegistry + if registry == nil { + registry = frameworkplugins.NewDefaultRegistry(&frameworkplugins.RegistryArgs{ + VolumeBinder: volumeBinder, + }) + } + registry.Merge(options.frameworkOutOfTreeRegistry) + + snapshot := nodeinfosnapshot.NewEmptySnapshot() + + configurator := &Configurator{ + client: client, + informerFactory: informerFactory, + podInformer: podInformer, + volumeBinder: volumeBinder, + schedulerCache: schedulerCache, + StopEverything: stopEverything, + hardPodAffinitySymmetricWeight: options.hardPodAffinitySymmetricWeight, + disablePreemption: options.disablePreemption, + percentageOfNodesToScore: options.percentageOfNodesToScore, + bindTimeoutSeconds: options.bindTimeoutSeconds, + podInitialBackoffSeconds: options.podInitialBackoffSeconds, + podMaxBackoffSeconds: options.podMaxBackoffSeconds, + enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NonPreemptingPriority), + registry: registry, + plugins: options.frameworkPlugins, + pluginConfig: options.frameworkPluginConfig, + pluginConfigProducerRegistry: options.frameworkConfigProducerRegistry, + nodeInfoSnapshot: snapshot, + algorithmFactoryArgs: AlgorithmFactoryArgs{ + SharedLister: snapshot, + InformerFactory: informerFactory, + VolumeBinder: volumeBinder, + HardPodAffinitySymmetricWeight: options.hardPodAffinitySymmetricWeight, + }, + configProducerArgs: &frameworkplugins.ConfigProducerArgs{}, + } + + var sched *Scheduler + source := options.schedulerAlgorithmSource switch { case source.Provider != nil: // Create the config from a named algorithm provider. @@ -218,7 +344,7 @@ func New(client clientset.Interface, if err != nil { return nil, fmt.Errorf("couldn't create scheduler using provider %q: %v", *source.Provider, err) } - config = sc + sched = sc case source.Policy != nil: // Create the config from a user specified policy source. policy := &schedulerapi.Policy{} @@ -236,19 +362,20 @@ func New(client clientset.Interface, if err != nil { return nil, fmt.Errorf("couldn't create scheduler from policy: %v", err) } - config = sc + sched = sc default: return nil, fmt.Errorf("unsupported algorithm source: %v", source) } + metrics.Register() // Additional tweaks to the config produced by the configurator. - config.Recorder = recorder - config.DisablePreemption = options.disablePreemption - config.StopEverything = stopCh - - // Create the scheduler. - sched := NewFromConfig(config) - - AddAllEventHandlers(sched, options.schedulerName, nodeInformer, podInformer, pvInformer, pvcInformer, serviceInformer, storageClassInformer, csiNodeInformer) + sched.Recorder = recorder + sched.DisablePreemption = options.disablePreemption + sched.StopEverything = stopEverything + sched.podConditionUpdater = &podConditionUpdaterImpl{client} + sched.podPreemptor = &podPreemptorImpl{client} + sched.scheduledPodsHasSynced = podInformer.Informer().HasSynced + + AddAllEventHandlers(sched, options.schedulerName, informerFactory, podInformer) return sched, nil } @@ -263,7 +390,7 @@ func initPolicyFromFile(policyFile string, policy *schedulerapi.Policy) error { if err != nil { return fmt.Errorf("couldn't read policy config: %v", err) } - err = runtime.DecodeInto(latestschedulerapi.Codec, []byte(data), policy) + err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), []byte(data), policy) if err != nil { return fmt.Errorf("invalid policy: %v", err) } @@ -271,60 +398,40 @@ func initPolicyFromFile(policyFile string, policy *schedulerapi.Policy) error { } // initPolicyFromConfigMap initialize policy from configMap -func initPolicyFromConfigMap(client clientset.Interface, policyRef *kubeschedulerconfig.SchedulerPolicyConfigMapSource, policy *schedulerapi.Policy) error { +func initPolicyFromConfigMap(client clientset.Interface, policyRef *schedulerapi.SchedulerPolicyConfigMapSource, policy *schedulerapi.Policy) error { // Use a policy serialized in a config map value. policyConfigMap, err := client.CoreV1().ConfigMaps(policyRef.Namespace).Get(policyRef.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("couldn't get policy config map %s/%s: %v", policyRef.Namespace, policyRef.Name, err) } - data, found := policyConfigMap.Data[kubeschedulerconfig.SchedulerPolicyConfigMapKey] + data, found := policyConfigMap.Data[schedulerapi.SchedulerPolicyConfigMapKey] if !found { - return fmt.Errorf("missing policy config map value at key %q", kubeschedulerconfig.SchedulerPolicyConfigMapKey) + return fmt.Errorf("missing policy config map value at key %q", schedulerapi.SchedulerPolicyConfigMapKey) } - err = runtime.DecodeInto(latestschedulerapi.Codec, []byte(data), policy) + err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), []byte(data), policy) if err != nil { return fmt.Errorf("invalid policy: %v", err) } return nil } -// NewFromConfig returns a new scheduler using the provided Config. -func NewFromConfig(config *factory.Config) *Scheduler { - metrics.Register() - return &Scheduler{ - SchedulerCache: config.SchedulerCache, - Algorithm: config.Algorithm, - GetBinder: config.GetBinder, - PodConditionUpdater: config.PodConditionUpdater, - PodPreemptor: config.PodPreemptor, - Framework: config.Framework, - NextPod: config.NextPod, - WaitForCacheSync: config.WaitForCacheSync, - Error: config.Error, - Recorder: config.Recorder, - StopEverything: config.StopEverything, - VolumeBinder: config.VolumeBinder, - DisablePreemption: config.DisablePreemption, - SchedulingQueue: config.SchedulingQueue, - } -} - -// Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately. -func (sched *Scheduler) Run() { - if !sched.WaitForCacheSync() { +// Run begins watching and scheduling. It waits for cache to be synced, then starts scheduling and blocked until the context is done. +func (sched *Scheduler) Run(ctx context.Context) { + if !cache.WaitForCacheSync(ctx.Done(), sched.scheduledPodsHasSynced) { return } - go wait.Until(sched.scheduleOne, 0, sched.StopEverything) + wait.UntilWithContext(ctx, sched.scheduleOne, 0) } // recordFailedSchedulingEvent records an event for the pod that indicates the // pod has failed to schedule. // NOTE: This function modifies "pod". "pod" should be copied before being passed. -func (sched *Scheduler) recordSchedulingFailure(pod *v1.Pod, err error, reason string, message string) { - sched.Error(pod, err) +func (sched *Scheduler) recordSchedulingFailure(podInfo *framework.PodInfo, err error, reason string, message string) { + sched.Error(podInfo, err) + pod := podInfo.Pod sched.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", message) - if err := sched.PodConditionUpdater.Update(pod, &v1.PodCondition{ + if err := sched.podConditionUpdater.update(pod, &v1.PodCondition{ Type: v1.PodScheduled, Status: v1.ConditionFalse, Reason: reason, @@ -334,29 +441,17 @@ func (sched *Scheduler) recordSchedulingFailure(pod *v1.Pod, err error, reason s } } -// schedule implements the scheduling algorithm and returns the suggested result(host, -// evaluated nodes number,feasible nodes number). -func (sched *Scheduler) schedule(pod *v1.Pod, pluginContext *framework.PluginContext) (core.ScheduleResult, error) { - result, err := sched.Algorithm.Schedule(pod, pluginContext) - if err != nil { - pod = pod.DeepCopy() - sched.recordSchedulingFailure(pod, err, v1.PodReasonUnschedulable, err.Error()) - return core.ScheduleResult{}, err - } - return result, err -} - // preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible. // If it succeeds, it adds the name of the node where preemption has happened to the pod spec. // It returns the node name and an error if any. -func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, scheduleErr error) (string, error) { - preemptor, err := sched.PodPreemptor.GetUpdatedPod(preemptor) +func (sched *Scheduler) preempt(ctx context.Context, state *framework.CycleState, fwk framework.Framework, preemptor *v1.Pod, scheduleErr error) (string, error) { + preemptor, err := sched.podPreemptor.getUpdatedPod(preemptor) if err != nil { klog.Errorf("Error getting the updated preemptor pod object: %v", err) return "", err } - node, victims, nominatedPodsToClear, err := sched.Algorithm.Preempt(preemptor, scheduleErr) + node, victims, nominatedPodsToClear, err := sched.Algorithm.Preempt(ctx, state, preemptor, scheduleErr) if err != nil { klog.Errorf("Error preempting victims to make room for %v/%v: %v", preemptor.Namespace, preemptor.Name, err) return "", err @@ -370,7 +465,7 @@ func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, sche sched.SchedulingQueue.UpdateNominatedPodForNode(preemptor, nodeName) // Make a call to update nominated node name of the pod on the API server. - err = sched.PodPreemptor.SetNominatedNodeName(preemptor, nodeName) + err = sched.podPreemptor.setNominatedNodeName(preemptor, nodeName) if err != nil { klog.Errorf("Error in preemption process. Cannot set 'NominatedPod' on pod %v/%v: %v", preemptor.Namespace, preemptor.Name, err) sched.SchedulingQueue.DeleteNominatedPodIfExists(preemptor) @@ -378,7 +473,7 @@ func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, sche } for _, victim := range victims { - if err := sched.PodPreemptor.DeletePod(victim); err != nil { + if err := sched.podPreemptor.deletePod(victim); err != nil { klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) return "", err } @@ -389,7 +484,7 @@ func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, sche sched.Recorder.Eventf(victim, preemptor, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) } - metrics.PreemptionVictims.Set(float64(len(victims))) + metrics.PreemptionVictims.Observe(float64(len(victims))) } // Clearing nominated pods should happen outside of "if node != nil". Node could // be nil when a pod with nominated node name is eligible to preempt again, @@ -397,7 +492,7 @@ func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, sche // function of generic_scheduler.go returns the pod itself for removal of // the 'NominatedPod' field. for _, p := range nominatedPodsToClear { - rErr := sched.PodPreemptor.RemoveNominatedNodeName(p) + rErr := sched.podPreemptor.removeNominatedNodeName(p) if rErr != nil { klog.Errorf("Cannot remove 'NominatedPod' field of pod: %v", rErr) // We do not return as this error is not critical. @@ -406,18 +501,6 @@ func (sched *Scheduler) preempt(fwk framework.Framework, preemptor *v1.Pod, sche return nodeName, err } -// assumeVolumes will update the volume cache with the chosen bindings -// -// This function modifies assumed if volume binding is required. -func (sched *Scheduler) assumeVolumes(assumed *v1.Pod, host string) (allBound bool, err error) { - allBound, err = sched.VolumeBinder.Binder.AssumePodVolumes(assumed, host) - if err != nil { - sched.recordSchedulingFailure(assumed, err, SchedulerError, - fmt.Sprintf("AssumePodVolumes failed: %v", err)) - } - return -} - // bindVolumes will make the API update with the assumed bindings and wait until // the PV controller has completely finished the binding operation. // @@ -434,7 +517,6 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } - sched.recordSchedulingFailure(assumed, err, "VolumeBindingFailed", err.Error()) return err } @@ -453,14 +535,6 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { if err := sched.SchedulerCache.AssumePod(assumed); err != nil { klog.Errorf("scheduler cache AssumePod failed: %v", err) - - // This is most probably result of a BUG in retrying logic. - // We report an error here so that pod scheduling can be retried. - // This relies on the fact that Error will check if the pod has been bound - // to a node and if so will not add it back to the unscheduled pods queue - // (otherwise this would cause an infinite loop). - sched.recordSchedulingFailure(assumed, err, SchedulerError, - fmt.Sprintf("AssumePod failed: %v", err)) return err } // if "assumed" is a nominated pod, we should remove it from internal cache @@ -473,9 +547,9 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // bind binds a pod to a given node defined in a binding object. We expect this to run asynchronously, so we // handle binding metrics internally. -func (sched *Scheduler) bind(assumed *v1.Pod, targetNode string, pluginContext *framework.PluginContext) error { +func (sched *Scheduler) bind(ctx context.Context, assumed *v1.Pod, targetNode string, state *framework.CycleState) error { bindingStart := time.Now() - bindStatus := sched.Framework.RunBindPlugins(pluginContext, assumed, targetNode) + bindStatus := sched.Framework.RunBindPlugins(ctx, state, assumed, targetNode) var err error if !bindStatus.IsSuccess() { if bindStatus.Code() == framework.Skip { @@ -513,14 +587,15 @@ func (sched *Scheduler) bind(assumed *v1.Pod, targetNode string, pluginContext * } // scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. -func (sched *Scheduler) scheduleOne() { +func (sched *Scheduler) scheduleOne(ctx context.Context) { fwk := sched.Framework - pod := sched.NextPod() + podInfo := sched.NextPod() // pod could be nil when schedulerQueue is closed - if pod == nil { + if podInfo == nil || podInfo.Pod == nil { return } + pod := podInfo.Pod if pod.DeletionTimestamp != nil { sched.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) klog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) @@ -531,10 +606,14 @@ func (sched *Scheduler) scheduleOne() { // Synchronously attempt to find a fit for the pod. start := time.Now() - pluginContext := framework.NewPluginContext() - scheduleResult, err := sched.schedule(pod, pluginContext) + state := framework.NewCycleState() + state.SetRecordFrameworkMetrics(rand.Intn(100) < frameworkMetricsSamplePercent) + schedulingCycleCtx, cancel := context.WithCancel(ctx) + defer cancel() + scheduleResult, err := sched.Algorithm.Schedule(schedulingCycleCtx, state, pod) if err != nil { - // schedule() may have failed because the pod would not fit on any host, so we try to + sched.recordSchedulingFailure(podInfo.DeepCopy(), err, v1.PodReasonUnschedulable, err.Error()) + // Schedule() may have failed because the pod would not fit on any host, so we try to // preempt, with the expectation that the next time the pod is tried for scheduling it // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. @@ -544,10 +623,10 @@ func (sched *Scheduler) scheduleOne() { " No preemption is performed.") } else { preemptionStartTime := time.Now() - sched.preempt(fwk, pod, fitError) + sched.preempt(schedulingCycleCtx, state, fwk, pod, fitError) metrics.PreemptionAttempts.Inc() - metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInSeconds(preemptionStartTime)) - metrics.DeprecatedSchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) + metrics.SchedulingAlgorithmPreemptionEvaluationDuration.Observe(metrics.SinceInSeconds(preemptionStartTime)) + metrics.DeprecatedSchedulingAlgorithmPreemptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) metrics.DeprecatedSchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) } @@ -565,7 +644,8 @@ func (sched *Scheduler) scheduleOne() { metrics.DeprecatedSchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. - assumedPod := pod.DeepCopy() + assumedPodInfo := podInfo.DeepCopy() + assumedPod := assumedPodInfo.Pod // Assume volumes first before assuming the pod. // @@ -574,16 +654,17 @@ func (sched *Scheduler) scheduleOne() { // Otherwise, binding of volumes is started after the pod is assumed, but before pod binding. // // This function modifies 'assumedPod' if volume binding is required. - allBound, err := sched.assumeVolumes(assumedPod, scheduleResult.SuggestedHost) + allBound, err := sched.VolumeBinder.Binder.AssumePodVolumes(assumedPod, scheduleResult.SuggestedHost) if err != nil { - klog.Errorf("error assuming volumes: %v", err) + sched.recordSchedulingFailure(assumedPodInfo, err, SchedulerError, + fmt.Sprintf("AssumePodVolumes failed: %v", err)) metrics.PodScheduleErrors.Inc() return } // Run "reserve" plugins. - if sts := fwk.RunReservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() { - sched.recordSchedulingFailure(assumedPod, sts.AsError(), SchedulerError, sts.Message()) + if sts := fwk.RunReservePlugins(schedulingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() { + sched.recordSchedulingFailure(assumedPodInfo, sts.AsError(), SchedulerError, sts.Message()) metrics.PodScheduleErrors.Inc() return } @@ -591,28 +672,26 @@ func (sched *Scheduler) scheduleOne() { // assume modifies `assumedPod` by setting NodeName=scheduleResult.SuggestedHost err = sched.assume(assumedPod, scheduleResult.SuggestedHost) if err != nil { - klog.Errorf("error assuming pod: %v", err) + // This is most probably result of a BUG in retrying logic. + // We report an error here so that pod scheduling can be retried. + // This relies on the fact that Error will check if the pod has been bound + // to a node and if so will not add it back to the unscheduled pods queue + // (otherwise this would cause an infinite loop). + sched.recordSchedulingFailure(assumedPodInfo, err, SchedulerError, fmt.Sprintf("AssumePod failed: %v", err)) metrics.PodScheduleErrors.Inc() // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + fwk.RunUnreservePlugins(schedulingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) return } // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). go func() { - // Bind volumes first before Pod - if !allBound { - err := sched.bindVolumes(assumedPod) - if err != nil { - klog.Errorf("error binding volumes: %v", err) - metrics.PodScheduleErrors.Inc() - // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) - return - } - } + bindingCycleCtx, cancel := context.WithCancel(ctx) + defer cancel() + metrics.SchedulerGoroutines.WithLabelValues("binding").Inc() + defer metrics.SchedulerGoroutines.WithLabelValues("binding").Dec() // Run "permit" plugins. - permitStatus := fwk.RunPermitPlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + permitStatus := fwk.RunPermitPlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) if !permitStatus.IsSuccess() { var reason string if permitStatus.IsUnschedulable() { @@ -626,63 +705,102 @@ func (sched *Scheduler) scheduleOne() { klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) - sched.recordSchedulingFailure(assumedPod, permitStatus.AsError(), reason, permitStatus.Message()) + fwk.RunUnreservePlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) + sched.recordSchedulingFailure(assumedPodInfo, permitStatus.AsError(), reason, permitStatus.Message()) return } + // Bind volumes first before Pod + if !allBound { + err := sched.bindVolumes(assumedPod) + if err != nil { + sched.recordSchedulingFailure(assumedPodInfo, err, "VolumeBindingFailed", err.Error()) + metrics.PodScheduleErrors.Inc() + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) + return + } + } + // Run "prebind" plugins. - preBindStatus := fwk.RunPreBindPlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + preBindStatus := fwk.RunPreBindPlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) if !preBindStatus.IsSuccess() { var reason string - if preBindStatus.IsUnschedulable() { - metrics.PodScheduleFailures.Inc() - reason = v1.PodReasonUnschedulable - } else { - metrics.PodScheduleErrors.Inc() - reason = SchedulerError - } + metrics.PodScheduleErrors.Inc() + reason = SchedulerError if forgetErr := sched.Cache().ForgetPod(assumedPod); forgetErr != nil { klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) - sched.recordSchedulingFailure(assumedPod, preBindStatus.AsError(), reason, preBindStatus.Message()) + fwk.RunUnreservePlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) + sched.recordSchedulingFailure(assumedPodInfo, preBindStatus.AsError(), reason, preBindStatus.Message()) return } - err := sched.bind(assumedPod, scheduleResult.SuggestedHost, pluginContext) + err := sched.bind(bindingCycleCtx, assumedPod, scheduleResult.SuggestedHost, state) metrics.E2eSchedulingLatency.Observe(metrics.SinceInSeconds(start)) metrics.DeprecatedE2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { - klog.Errorf("error binding pod: %v", err) metrics.PodScheduleErrors.Inc() // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) - sched.recordSchedulingFailure(assumedPod, err, SchedulerError, fmt.Sprintf("Binding rejected: %v", err)) + fwk.RunUnreservePlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) + sched.recordSchedulingFailure(assumedPodInfo, err, SchedulerError, fmt.Sprintf("Binding rejected: %v", err)) } else { // Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2. if klog.V(2) { - node, _ := sched.Cache().GetNodeInfo(scheduleResult.SuggestedHost) - klog.Infof("pod %v/%v is bound successfully on node %q, %d nodes evaluated, %d nodes were found feasible. Bound node resource: %q.", assumedPod.Namespace, assumedPod.Name, scheduleResult.SuggestedHost, scheduleResult.EvaluatedNodes, scheduleResult.FeasibleNodes, nodeResourceString(node)) + klog.Infof("pod %v/%v is bound successfully on node %q, %d nodes evaluated, %d nodes were found feasible.", assumedPod.Namespace, assumedPod.Name, scheduleResult.SuggestedHost, scheduleResult.EvaluatedNodes, scheduleResult.FeasibleNodes) } metrics.PodScheduleSuccesses.Inc() + metrics.PodSchedulingAttempts.Observe(float64(podInfo.Attempts)) + metrics.PodSchedulingDuration.Observe(metrics.SinceInSeconds(podInfo.InitialAttemptTimestamp)) // Run "postbind" plugins. - fwk.RunPostBindPlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + fwk.RunPostBindPlugins(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) } }() } -// nodeResourceString returns a string representation of node resources. -func nodeResourceString(n *v1.Node) string { - if n == nil { - return "N/A" +type podConditionUpdaterImpl struct { + Client clientset.Interface +} + +func (p *podConditionUpdaterImpl) update(pod *v1.Pod, condition *v1.PodCondition) error { + klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason) + if podutil.UpdatePodCondition(&pod.Status, condition) { + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + return err + } + return nil +} + +type podPreemptorImpl struct { + Client clientset.Interface +} + +func (p *podPreemptorImpl) getUpdatedPod(pod *v1.Pod) (*v1.Pod, error) { + return p.Client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) +} + +func (p *podPreemptorImpl) deletePod(pod *v1.Pod) error { + return p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) +} + +func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error { + podCopy := pod.DeepCopy() + podCopy.Status.NominatedNodeName = nominatedNodeName + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(podCopy) + return err +} + +func (p *podPreemptorImpl) removeNominatedNodeName(pod *v1.Pod) error { + if len(pod.Status.NominatedNodeName) == 0 { + return nil } - return fmt.Sprintf("Capacity: %s; Allocatable: %s.", resourceString(&n.Status.Capacity), resourceString(&n.Status.Allocatable)) + return p.setNominatedNodeName(pod, "") } -func resourceString(r *v1.ResourceList) string { - return fmt.Sprintf("CPU<%s>|Memory<%s>|Pods<%s>|StorageEphemeral<%s>", r.Cpu().String(), r.Memory().String(), r.Pods().String(), r.StorageEphemeral().String()) +func defaultAlgorithmSourceProviderName() *string { + provider := schedulerapi.SchedulerDefaultProviderName + return &provider } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD index 3d1a71aa14b..4518ec6c45e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD @@ -10,13 +10,13 @@ go_test( name = "go_default_test", srcs = [ "error_channel_test.go", - "heap_test.go", "utils_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/apis/scheduling:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) @@ -26,17 +26,14 @@ go_library( srcs = [ "clock.go", "error_channel.go", - "heap.go", "utils.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/util", deps = [ - "//pkg/apis/scheduling:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/metrics:go_default_library", + "//pkg/api/v1/pod:go_default_library", + "//pkg/scheduler/apis/extender/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index 1a46bfcf754..c1d5c3f5d97 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -17,14 +17,13 @@ limitations under the License. package util import ( - "sort" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" - "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/scheduler/api" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1" ) // GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair @@ -49,17 +48,6 @@ func GetPodFullName(pod *v1.Pod) string { return pod.Name + "_" + pod.Namespace } -// GetPodPriority returns priority of the given pod. -func GetPodPriority(pod *v1.Pod) int32 { - if pod.Spec.Priority != nil { - return *pod.Spec.Priority - } - // When priority of a running pod is nil, it means it was created at a time - // that there was no global default priority class and the priority class - // name of the pod was empty. So, we resolve to the static default priority. - return scheduling.DefaultPriorityWhenNoDefaultClassExists -} - // GetPodStartTime returns start time of the given pod. func GetPodStartTime(pod *v1.Pod) *metav1.Time { if pod.Status.StartTime != nil { @@ -73,9 +61,13 @@ func GetPodStartTime(pod *v1.Pod) *metav1.Time { return &metav1.Time{Time: time.Now()} } +// lessFunc is a function that receives two items and returns true if the first +// item should be placed before the second one when the list is sorted. +type lessFunc = func(item1, item2 interface{}) bool + // GetEarliestPodStartTime returns the earliest start time of all pods that // have the highest priority among all victims. -func GetEarliestPodStartTime(victims *api.Victims) *metav1.Time { +func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time { if len(victims.Pods) == 0 { // should not reach here. klog.Errorf("victims.Pods is empty. Should not reach here.") @@ -83,15 +75,15 @@ func GetEarliestPodStartTime(victims *api.Victims) *metav1.Time { } earliestPodStartTime := GetPodStartTime(victims.Pods[0]) - highestPriority := GetPodPriority(victims.Pods[0]) + maxPriority := podutil.GetPodPriority(victims.Pods[0]) for _, pod := range victims.Pods { - if GetPodPriority(pod) == highestPriority { + if podutil.GetPodPriority(pod) == maxPriority { if GetPodStartTime(pod).Before(earliestPodStartTime) { earliestPodStartTime = GetPodStartTime(pod) } - } else if GetPodPriority(pod) > highestPriority { - highestPriority = GetPodPriority(pod) + } else if podutil.GetPodPriority(pod) > maxPriority { + maxPriority = podutil.GetPodPriority(pod) earliestPodStartTime = GetPodStartTime(pod) } } @@ -99,43 +91,15 @@ func GetEarliestPodStartTime(victims *api.Victims) *metav1.Time { return earliestPodStartTime } -// SortableList is a list that implements sort.Interface. -type SortableList struct { - Items []interface{} - CompFunc LessFunc -} - -// LessFunc is a function that receives two items and returns true if the first -// item should be placed before the second one when the list is sorted. -type LessFunc func(item1, item2 interface{}) bool - -var _ = sort.Interface(&SortableList{}) - -func (l *SortableList) Len() int { return len(l.Items) } - -func (l *SortableList) Less(i, j int) bool { - return l.CompFunc(l.Items[i], l.Items[j]) -} - -func (l *SortableList) Swap(i, j int) { - l.Items[i], l.Items[j] = l.Items[j], l.Items[i] -} - -// Sort sorts the items in the list using the given CompFunc. Item1 is placed -// before Item2 when CompFunc(Item1, Item2) returns true. -func (l *SortableList) Sort() { - sort.Sort(l) -} - // MoreImportantPod return true when priority of the first pod is higher than // the second one. If two pods' priorities are equal, compare their StartTime. // It takes arguments of the type "interface{}" to be used with SortableList, // but expects those arguments to be *v1.Pod. -func MoreImportantPod(pod1, pod2 interface{}) bool { - p1 := GetPodPriority(pod1.(*v1.Pod)) - p2 := GetPodPriority(pod2.(*v1.Pod)) +func MoreImportantPod(pod1, pod2 *v1.Pod) bool { + p1 := podutil.GetPodPriority(pod1) + p2 := podutil.GetPodPriority(pod2) if p1 != p2 { return p1 > p2 } - return GetPodStartTime(pod1.(*v1.Pod)).Before(GetPodStartTime(pod2.(*v1.Pod))) + return GetPodStartTime(pod1).Before(GetPodStartTime(pod2)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/volumebinder/volume_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/volumebinder/volume_binder.go index fdc3b3e32d1..32d14059ce1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/volumebinder/volume_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/volumebinder/volume_binder.go @@ -19,7 +19,7 @@ package volumebinder import ( "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" @@ -35,13 +35,14 @@ type VolumeBinder struct { func NewVolumeBinder( client clientset.Interface, nodeInformer coreinformers.NodeInformer, + csiNodeInformer storageinformers.CSINodeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, pvInformer coreinformers.PersistentVolumeInformer, storageClassInformer storageinformers.StorageClassInformer, bindTimeout time.Duration) *VolumeBinder { return &VolumeBinder{ - Binder: volumescheduling.NewVolumeBinder(client, nodeInformer, pvcInformer, pvInformer, storageClassInformer, bindTimeout), + Binder: volumescheduling.NewVolumeBinder(client, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, bindTimeout), } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go index 5352f1332e2..2c67ff57861 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go @@ -50,7 +50,7 @@ func isRequired(pod *v1.Pod) bool { return false } -// Returns the name of the profile to use with the container. +// GetProfileName returns the name of the profile to use with the container. func GetProfileName(pod *v1.Pod, containerName string) string { return GetProfileNameFromPodAnnotations(pod.Annotations, containerName) } @@ -61,7 +61,7 @@ func GetProfileNameFromPodAnnotations(annotations map[string]string, containerNa return annotations[ContainerAnnotationKeyPrefix+containerName] } -// Sets the name of the profile to use with the container. +// SetProfileName sets the name of the profile to use with the container. func SetProfileName(pod *v1.Pod, containerName, profileName string) error { if pod.Annotations == nil { pod.Annotations = map[string]string{} @@ -70,7 +70,7 @@ func SetProfileName(pod *v1.Pod, containerName, profileName string) error { return nil } -// Sets the name of the profile to use with the container. +// SetProfileNameFromPodAnnotations sets the name of the profile to use with the container. func SetProfileNameFromPodAnnotations(annotations map[string]string, containerName, profileName string) error { if annotations == nil { return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go index bd721759566..1742b4e040d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -37,12 +37,13 @@ import ( // Set to true if the wrong build tags are set (see validate_disabled.go). var isDisabledBuild bool -// Interface for validating that a pod with an AppArmor profile can be run by a Node. +// Validator is a interface for validating that a pod with an AppArmor profile can be run by a Node. type Validator interface { Validate(pod *v1.Pod) error ValidateHost() error } +// NewValidator is in order to find AppArmor FS func NewValidator(runtime string) Validator { if err := validateHost(runtime); err != nil { return &validator{validateHostErr: err} @@ -102,7 +103,7 @@ func validateHost(runtime string) error { // Check build support. if isDisabledBuild { - return errors.New("Binary not compiled for linux") + return errors.New("binary not compiled for linux") } // Check kernel support. @@ -112,7 +113,7 @@ func validateHost(runtime string) error { // Check runtime support. Currently only Docker is supported. if runtime != kubetypes.DockerContainerRuntime && runtime != kubetypes.RemoteContainerRuntime { - return fmt.Errorf("AppArmor is only enabled for 'docker' and 'remote' runtimes. Found: %q.", runtime) + return fmt.Errorf("AppArmor is only enabled for 'docker' and 'remote' runtimes. Found: %q", runtime) } return nil @@ -134,6 +135,7 @@ func validateProfile(profile string, loadedProfiles map[string]bool) error { return nil } +// ValidateProfileFormat checks the format of the profile. func ValidateProfileFormat(profile string) error { if profile == "" || profile == ProfileRuntimeDefault || profile == ProfileNameUnconfined { return nil @@ -198,12 +200,10 @@ func getAppArmorFS() (string, error) { msg := fmt.Sprintf("path %s does not exist", appArmorFS) if err != nil { return "", fmt.Errorf("%s: %v", msg, err) - } else { - return "", errors.New(msg) } - } else { - return appArmorFS, nil + return "", errors.New(msg) } + return appArmorFS, nil } } if err := scanner.Err(); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go index 715156e8a01..0c8d3faf9e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go @@ -27,9 +27,9 @@ import ( const ( // AllowAny is the wildcard used to allow any profile. AllowAny = "*" - // The annotation key specifying the default seccomp profile. + // DefaultProfileAnnotationKey specifies the default seccomp profile. DefaultProfileAnnotationKey = "seccomp.security.alpha.kubernetes.io/defaultProfileName" - // The annotation key specifying the allowed seccomp profiles. + // AllowedProfilesAnnotationKey specifies the allowed seccomp profiles. AllowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go index 6c33276c563..cab5f59f3e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go @@ -46,8 +46,6 @@ type mustMatchPatterns struct { var ( _ SysctlsStrategy = &mustMatchPatterns{} - - defaultSysctlsPatterns = []string{"*"} ) // NewMustMatchPatterns creates a new mustMatchPatterns strategy that will provide validation. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go index 57c482f0ba6..4739493ee4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go @@ -106,7 +106,7 @@ func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private klog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token has been invalidated") } - if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 { + if !bytes.Equal(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) { klog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token does not match server's copy") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go index 5d74d53b13c..385b81f5e29 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go @@ -40,6 +40,10 @@ type BoundedFrequencyRunner struct { lastRun time.Time // time of last run timer timer // timer for deferred runs limiter rateLimiter // rate limiter for on-demand runs + + retry chan struct{} // schedule a retry + retryMu sync.Mutex // guards retryTime + retryTime time.Time // when to retry } // designed so that flowcontrol.RateLimiter satisfies @@ -72,6 +76,9 @@ type timer interface { // See time.Now. Now() time.Time + // Remaining returns the time until the timer will go off (if it is running). + Remaining() time.Duration + // See time.Since. Since(t time.Time) time.Duration @@ -81,26 +88,40 @@ type timer interface { // implement our timer in terms of std time.Timer. type realTimer struct { - *time.Timer + timer *time.Timer + next time.Time +} + +func (rt *realTimer) C() <-chan time.Time { + return rt.timer.C } -func (rt realTimer) C() <-chan time.Time { - return rt.Timer.C +func (rt *realTimer) Reset(d time.Duration) bool { + rt.next = time.Now().Add(d) + return rt.timer.Reset(d) } -func (rt realTimer) Now() time.Time { +func (rt *realTimer) Stop() bool { + return rt.timer.Stop() +} + +func (rt *realTimer) Now() time.Time { return time.Now() } -func (rt realTimer) Since(t time.Time) time.Duration { +func (rt *realTimer) Remaining() time.Duration { + return rt.next.Sub(time.Now()) +} + +func (rt *realTimer) Since(t time.Time) time.Duration { return time.Since(t) } -func (rt realTimer) Sleep(d time.Duration) { +func (rt *realTimer) Sleep(d time.Duration) { time.Sleep(d) } -var _ timer = realTimer{} +var _ timer = &realTimer{} // NewBoundedFrequencyRunner creates a new BoundedFrequencyRunner instance, // which will manage runs of the specified function. @@ -132,8 +153,8 @@ var _ timer = realTimer{} // The maxInterval must be greater than or equal to the minInterval, If the // caller passes a maxInterval less than minInterval, this function will panic. func NewBoundedFrequencyRunner(name string, fn func(), minInterval, maxInterval time.Duration, burstRuns int) *BoundedFrequencyRunner { - timer := realTimer{Timer: time.NewTimer(0)} // will tick immediately - <-timer.C() // consume the first tick + timer := &realTimer{timer: time.NewTimer(0)} // will tick immediately + <-timer.C() // consume the first tick return construct(name, fn, minInterval, maxInterval, burstRuns, timer) } @@ -152,6 +173,7 @@ func construct(name string, fn func(), minInterval, maxInterval time.Duration, b minInterval: minInterval, maxInterval: maxInterval, run: make(chan struct{}, 1), + retry: make(chan struct{}, 1), timer: timer, } if minInterval == 0 { @@ -179,6 +201,8 @@ func (bfr *BoundedFrequencyRunner) Loop(stop <-chan struct{}) { bfr.tryRun() case <-bfr.run: bfr.tryRun() + case <-bfr.retry: + bfr.doRetry() } } } @@ -199,6 +223,36 @@ func (bfr *BoundedFrequencyRunner) Run() { } } +// RetryAfter ensures that the function will run again after no later than interval. This +// can be called from inside a run of the BoundedFrequencyRunner's function, or +// asynchronously. +func (bfr *BoundedFrequencyRunner) RetryAfter(interval time.Duration) { + // This could be called either with or without bfr.mu held, so we can't grab that + // lock, and therefore we can't update the timer directly. + + // If the Loop thread is currently running fn then it may be a while before it + // processes our retry request. But we want to retry at interval from now, not at + // interval from "whenever doRetry eventually gets called". So we convert to + // absolute time. + retryTime := bfr.timer.Now().Add(interval) + + // We can't just write retryTime to a channel because there could be multiple + // RetryAfter calls before Loop gets a chance to read from the channel. So we + // record the soonest requested retry time in bfr.retryTime and then only signal + // the Loop thread once, just like Run does. + bfr.retryMu.Lock() + defer bfr.retryMu.Unlock() + if !bfr.retryTime.IsZero() && bfr.retryTime.Before(retryTime) { + return + } + bfr.retryTime = retryTime + + select { + case bfr.retry <- struct{}{}: + default: + } +} + // assumes the lock is not held func (bfr *BoundedFrequencyRunner) stop() { bfr.mu.Lock() @@ -207,6 +261,27 @@ func (bfr *BoundedFrequencyRunner) stop() { bfr.timer.Stop() } +// assumes the lock is not held +func (bfr *BoundedFrequencyRunner) doRetry() { + bfr.mu.Lock() + defer bfr.mu.Unlock() + bfr.retryMu.Lock() + defer bfr.retryMu.Unlock() + + if bfr.retryTime.IsZero() { + return + } + + // Timer wants an interval not an absolute time, so convert retryTime back now + retryInterval := bfr.retryTime.Sub(bfr.timer.Now()) + bfr.retryTime = time.Time{} + if retryInterval < bfr.timer.Remaining() { + klog.V(3).Infof("%s: retrying in %v", bfr.name, retryInterval) + bfr.timer.Stop() + bfr.timer.Reset(retryInterval) + } +} + // assumes the lock is not held func (bfr *BoundedFrequencyRunner) tryRun() { bfr.mu.Lock() @@ -223,17 +298,16 @@ func (bfr *BoundedFrequencyRunner) tryRun() { } // It can't run right now, figure out when it can run next. - - elapsed := bfr.timer.Since(bfr.lastRun) // how long since last run - nextPossible := bfr.minInterval - elapsed // time to next possible run - nextScheduled := bfr.maxInterval - elapsed // time to next periodic run + elapsed := bfr.timer.Since(bfr.lastRun) // how long since last run + nextPossible := bfr.minInterval - elapsed // time to next possible run + nextScheduled := bfr.timer.Remaining() // time to next scheduled run klog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled) + // It's hard to avoid race conditions in the unit tests unless we always reset + // the timer here, even when it's unchanged if nextPossible < nextScheduled { - // Set the timer for ASAP, but don't drain here. Assuming Loop is running, - // it might get a delivery in the mean time, but that is OK. - bfr.timer.Stop() - bfr.timer.Reset(nextPossible) - klog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible) + nextScheduled = nextPossible } + bfr.timer.Stop() + bfr.timer.Reset(nextScheduled) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD index bc590bbfa7c..068367e5edd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD @@ -20,6 +20,11 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -41,6 +46,10 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/configz/configz.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/configz/configz.go index 5bb192637e3..869af648bee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/configz/configz.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/configz/configz.go @@ -118,6 +118,7 @@ func write(w http.ResponseWriter) error { return fmt.Errorf("error marshaling json: %v", err) } w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Content-Type-Options", "nosniff") _, err = w.Write(b) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/dbus.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/dbus.go deleted file mode 100644 index 702d16e5dce..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/dbus.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dbus - -import ( - godbus "github.com/godbus/dbus" -) - -// Interface is an interface that presents a subset of the godbus/dbus API. Use this -// when you want to inject fakeable/mockable D-Bus behavior. -type Interface interface { - // SystemBus returns a connection to the system bus, connecting to it - // first if necessary - SystemBus() (Connection, error) - // SessionBus returns a connection to the session bus, connecting to it - // first if necessary - SessionBus() (Connection, error) -} - -// Connection represents a D-Bus connection -type Connection interface { - // Returns an Object representing the bus itself - BusObject() Object - - // Object creates a representation of a remote D-Bus object - Object(name, path string) Object - - // Signal registers or unregisters a channel to receive D-Bus signals - Signal(ch chan<- *godbus.Signal) -} - -// Object represents a remote D-Bus object -type Object interface { - // Call synchronously calls a D-Bus method - Call(method string, flags godbus.Flags, args ...interface{}) Call -} - -// Call represents a pending or completed D-Bus method call -type Call interface { - // Store returns a completed call's return values, or an error - Store(retvalues ...interface{}) error -} - -// Implements Interface in terms of actually talking to D-Bus -type dbusImpl struct { - systemBus *connImpl - sessionBus *connImpl -} - -// Implements Connection as a godbus.Conn -type connImpl struct { - conn *godbus.Conn -} - -// Implements Object as a godbus.Object -type objectImpl struct { - object godbus.BusObject -} - -// Implements Call as a godbus.Call -type callImpl struct { - call *godbus.Call -} - -// New returns a new Interface which will use godbus to talk to D-Bus -func New() Interface { - return &dbusImpl{} -} - -// SystemBus is part of Interface -func (db *dbusImpl) SystemBus() (Connection, error) { - if db.systemBus == nil { - bus, err := godbus.SystemBus() - if err != nil { - return nil, err - } - db.systemBus = &connImpl{bus} - } - - return db.systemBus, nil -} - -// SessionBus is part of Interface -func (db *dbusImpl) SessionBus() (Connection, error) { - if db.sessionBus == nil { - bus, err := godbus.SessionBus() - if err != nil { - return nil, err - } - db.sessionBus = &connImpl{bus} - } - - return db.sessionBus, nil -} - -// BusObject is part of the Connection interface -func (conn *connImpl) BusObject() Object { - return &objectImpl{conn.conn.BusObject()} -} - -// Object is part of the Connection interface -func (conn *connImpl) Object(name, path string) Object { - return &objectImpl{conn.conn.Object(name, godbus.ObjectPath(path))} -} - -// Signal is part of the Connection interface -func (conn *connImpl) Signal(ch chan<- *godbus.Signal) { - conn.conn.Signal(ch) -} - -// Call is part of the Object interface -func (obj *objectImpl) Call(method string, flags godbus.Flags, args ...interface{}) Call { - return &callImpl{obj.object.Call(method, flags, args...)} -} - -// Store is part of the Call interface -func (call *callImpl) Store(retvalues ...interface{}) error { - return call.call.Store(retvalues...) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go deleted file mode 100644 index cceec4ca772..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dbus - -import ( - "fmt" - "sync" - - godbus "github.com/godbus/dbus" -) - -// Fake is a simple fake Interface type. -type Fake struct { - systemBus *FakeConnection - sessionBus *FakeConnection -} - -// FakeConnection represents a fake D-Bus connection -type FakeConnection struct { - lock sync.Mutex - busObject *fakeObject - objects map[string]*fakeObject - signalHandlers []chan<- *godbus.Signal -} - -// FakeHandler is used to handle fake D-Bus method calls -type FakeHandler func(method string, args ...interface{}) ([]interface{}, error) - -type fakeObject struct { - handler FakeHandler -} - -type fakeCall struct { - ret []interface{} - err error -} - -// NewFake returns a new Interface which will fake talking to D-Bus -func NewFake(systemBus *FakeConnection, sessionBus *FakeConnection) *Fake { - return &Fake{systemBus, sessionBus} -} - -// NewFakeConnection returns a FakeConnection Interface -func NewFakeConnection() *FakeConnection { - return &FakeConnection{ - objects: make(map[string]*fakeObject), - } -} - -// SystemBus is part of Interface -func (db *Fake) SystemBus() (Connection, error) { - if db.systemBus != nil { - return db.systemBus, nil - } - return nil, fmt.Errorf("DBus is not running") -} - -// SessionBus is part of Interface -func (db *Fake) SessionBus() (Connection, error) { - if db.sessionBus != nil { - return db.sessionBus, nil - } - return nil, fmt.Errorf("DBus is not running") -} - -// BusObject is part of the Connection interface -func (conn *FakeConnection) BusObject() Object { - return conn.busObject -} - -// Object is part of the Connection interface -func (conn *FakeConnection) Object(name, path string) Object { - return conn.objects[name+path] -} - -// Signal is part of the Connection interface -func (conn *FakeConnection) Signal(ch chan<- *godbus.Signal) { - conn.lock.Lock() - defer conn.lock.Unlock() - for i := range conn.signalHandlers { - if conn.signalHandlers[i] == ch { - conn.signalHandlers = append(conn.signalHandlers[:i], conn.signalHandlers[i+1:]...) - return - } - } - conn.signalHandlers = append(conn.signalHandlers, ch) -} - -// SetBusObject sets the handler for the BusObject of conn -func (conn *FakeConnection) SetBusObject(handler FakeHandler) { - conn.busObject = &fakeObject{handler} -} - -// AddObject adds a handler for the Object at name and path -func (conn *FakeConnection) AddObject(name, path string, handler FakeHandler) { - conn.objects[name+path] = &fakeObject{handler} -} - -// EmitSignal emits a signal on conn -func (conn *FakeConnection) EmitSignal(name, path, iface, signal string, args ...interface{}) { - conn.lock.Lock() - defer conn.lock.Unlock() - sig := &godbus.Signal{ - Sender: name, - Path: godbus.ObjectPath(path), - Name: iface + "." + signal, - Body: args, - } - for _, ch := range conn.signalHandlers { - ch <- sig - } -} - -// Call is part of the Object interface -func (obj *fakeObject) Call(method string, flags godbus.Flags, args ...interface{}) Call { - ret, err := obj.handler(method, args...) - return &fakeCall{ret, err} -} - -// Store is part of the Call interface -func (call *fakeCall) Store(retvalues ...interface{}) error { - if call.err != nil { - return call.err - } - return godbus.Store(call.ret, retvalues...) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ebtables/ebtables.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ebtables/ebtables.go index 4a52df0fc83..ae84b5d0a16 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ebtables/ebtables.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ebtables/ebtables.go @@ -110,7 +110,7 @@ func getEbtablesVersionString(exec utilexec.Interface) (string, error) { if err != nil { return "", err } - versionMatcher := regexp.MustCompile("v([0-9]+\\.[0-9]+\\.[0-9]+)") + versionMatcher := regexp.MustCompile(`v([0-9]+\.[0-9]+\.[0-9]+)`) match := versionMatcher.FindStringSubmatch(string(bytes)) if match == nil { return "", fmt.Errorf("no ebtables version found in string: %s", bytes) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/env/env.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/env/env.go index dee544f7c66..6879c5052b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/env/env.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/env/env.go @@ -21,6 +21,8 @@ import ( "strconv" ) +// GetEnvAsStringOrFallback returns the env variable for the given key +// and falls back to the given defaultValue if not set func GetEnvAsStringOrFallback(key, defaultValue string) string { if v := os.Getenv(key); v != "" { return v @@ -28,6 +30,8 @@ func GetEnvAsStringOrFallback(key, defaultValue string) string { return defaultValue } +// GetEnvAsIntOrFallback returns the env variable (parsed as integer) for +// the given key and falls back to the given defaultValue if not set func GetEnvAsIntOrFallback(key string, defaultValue int) (int, error) { if v := os.Getenv(key); v != "" { value, err := strconv.Atoi(v) @@ -39,6 +43,8 @@ func GetEnvAsIntOrFallback(key string, defaultValue int) (int, error) { return defaultValue, nil } +// GetEnvAsFloat64OrFallback returns the env variable (parsed as float64) for +// the given key and falls back to the given defaultValue if not set func GetEnvAsFloat64OrFallback(key string, defaultValue float64) (float64, error) { if v := os.Getenv(key); v != "" { value, err := strconv.ParseFloat(v, 64) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD index a50da6ee217..970f6885add 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD @@ -13,6 +13,9 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/flock", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], @@ -22,6 +25,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD index a0d32ac7f78..81d3ce9bd41 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD @@ -17,17 +17,19 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/iptables", deps = [ - "//pkg/util/dbus:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", - "//vendor/github.com/godbus/dbus:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/trace:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], @@ -38,14 +40,22 @@ go_test( name = "go_default_test", srcs = [ "iptables_test.go", + "monitor_test.go", "save_restore_test.go", ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/dbus:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS index fc29299e675..ceb274ac52e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS @@ -9,5 +9,6 @@ approvers: - dcbw - thockin - eparis + - danwinship labels: - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index a874bb8a58c..50dce1543c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -25,11 +25,10 @@ import ( "sync" "time" - godbus "github.com/godbus/dbus" "k8s.io/apimachinery/pkg/util/sets" utilversion "k8s.io/apimachinery/pkg/util/version" + utilwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" - utildbus "k8s.io/kubernetes/pkg/util/dbus" utilexec "k8s.io/utils/exec" utiltrace "k8s.io/utils/trace" ) @@ -65,10 +64,17 @@ type Interface interface { Restore(table Table, data []byte, flush FlushFlag, counters RestoreCountersFlag) error // RestoreAll is the same as Restore except that no table is specified. RestoreAll(data []byte, flush FlushFlag, counters RestoreCountersFlag) error - // AddReloadFunc adds a function to call on iptables reload - AddReloadFunc(reloadFunc func()) - // Destroy cleans up resources used by the Interface - Destroy() + // Monitor detects when the given iptables tables have been flushed by an external + // tool (e.g. a firewall reload) by creating canary chains and polling to see if + // they have been deleted. (Specifically, it polls tables[0] every interval until + // the canary has been deleted from there, then waits a short additional time for + // the canaries to be deleted from the remaining tables as well. You can optimize + // the polling by listing a relatively empty table in tables[0]). When a flush is + // detected, this calls the reloadFunc so the caller can reload their own iptables + // rules. If it is unable to create the canary chains (either initially or after + // a reload) it will log an error and stop monitoring. + // (This function should be called from a goroutine.) + Monitor(canary Chain, tables []Table, reloadFunc func(), interval time.Duration, stopCh <-chan struct{}) // HasRandomFully reveals whether `-j MASQUERADE` takes the // `--random-fully` option. This is helpful to work around a // Linux kernel bug that sometimes causes multiple flows to get @@ -143,22 +149,17 @@ const LockfilePath16x = "/run/xtables.lock" type runner struct { mu sync.Mutex exec utilexec.Interface - dbus utildbus.Interface protocol Protocol hasCheck bool - hasListener bool hasRandomFully bool waitFlag []string restoreWaitFlag []string lockfilePath string - - reloadFuncs []func() - signal chan *godbus.Signal } // newInternal returns a new Interface which will exec iptables, and allows the // caller to change the iptables-restore lockfile path -func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol, lockfilePath string) Interface { +func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath string) Interface { version, err := getIPTablesVersion(exec, protocol) if err != nil { klog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) @@ -171,57 +172,19 @@ func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Prot runner := &runner{ exec: exec, - dbus: dbus, protocol: protocol, hasCheck: version.AtLeast(MinCheckVersion), - hasListener: false, hasRandomFully: version.AtLeast(RandomFullyMinVersion), waitFlag: getIPTablesWaitFlag(version), - restoreWaitFlag: getIPTablesRestoreWaitFlag(version), + restoreWaitFlag: getIPTablesRestoreWaitFlag(version, exec, protocol), lockfilePath: lockfilePath, } return runner } // New returns a new Interface which will exec iptables. -func New(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol) Interface { - return newInternal(exec, dbus, protocol, "") -} - -// Destroy is part of Interface. -func (runner *runner) Destroy() { - if runner.signal != nil { - runner.signal <- nil - } -} - -const ( - firewalldName = "org.fedoraproject.FirewallD1" - firewalldPath = "/org/fedoraproject/FirewallD1" - firewalldInterface = "org.fedoraproject.FirewallD1" -) - -// Connects to D-Bus and listens for FirewallD start/restart. (On non-FirewallD-using -// systems, this is effectively a no-op; we listen for the signals, but they will never be -// emitted, so reload() will never be called.) -func (runner *runner) connectToFirewallD() { - bus, err := runner.dbus.SystemBus() - if err != nil { - klog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) - return - } - runner.hasListener = true - - rule := fmt.Sprintf("type='signal',sender='%s',path='%s',interface='%s',member='Reloaded'", firewalldName, firewalldPath, firewalldInterface) - bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) - - rule = fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'", firewalldName) - bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) - - runner.signal = make(chan *godbus.Signal, 10) - bus.Signal(runner.signal) - - go runner.dbusSignalHandler(bus) +func New(exec utilexec.Interface, protocol Protocol) Interface { + return newInternal(exec, protocol, "") } // EnsureChain is part of Interface. @@ -434,7 +397,7 @@ func (runner *runner) runContext(ctx context.Context, op operation, args []strin iptablesCmd := iptablesCommand(runner.protocol) fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) - klog.V(5).Infof("running iptables %s %v", string(op), args) + klog.V(5).Infof("running iptables: %s %v", iptablesCmd, fullArgs) if ctx == nil { return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() } @@ -529,12 +492,84 @@ func (runner *runner) checkRuleUsingCheck(args []string) (bool, error) { return false, fmt.Errorf("error checking rule: %v: %s", err, out) } +const ( + // Max time we wait for an iptables flush to complete after we notice it has started + iptablesFlushTimeout = 5 * time.Second + // How often we poll while waiting for an iptables flush to complete + iptablesFlushPollTime = 100 * time.Millisecond +) + +// Monitor is part of Interface +func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), interval time.Duration, stopCh <-chan struct{}) { + for { + _ = utilwait.PollImmediateUntil(interval, func() (bool, error) { + for _, table := range tables { + if _, err := runner.EnsureChain(table, canary); err != nil { + klog.Warningf("Could not set up iptables canary %s/%s: %v", string(table), string(canary), err) + return false, nil + } + } + return true, nil + }, stopCh) + + // Poll until stopCh is closed or iptables is flushed + err := utilwait.PollUntil(interval, func() (bool, error) { + if exists, err := runner.chainExists(tables[0], canary); exists { + return false, nil + } else if isResourceError(err) { + klog.Warningf("Could not check for iptables canary %s/%s: %v", string(tables[0]), string(canary), err) + return false, nil + } + klog.V(2).Infof("iptables canary %s/%s deleted", string(tables[0]), string(canary)) + + // Wait for the other canaries to be deleted too before returning + // so we don't start reloading too soon. + err := utilwait.PollImmediate(iptablesFlushPollTime, iptablesFlushTimeout, func() (bool, error) { + for i := 1; i < len(tables); i++ { + if exists, err := runner.chainExists(tables[i], canary); exists || isResourceError(err) { + return false, nil + } + } + return true, nil + }) + if err != nil { + klog.Warning("Inconsistent iptables state detected.") + } + return true, nil + }, stopCh) + + if err != nil { + // stopCh was closed + for _, table := range tables { + _ = runner.DeleteChain(table, canary) + } + return + } + + klog.V(2).Infof("Reloading after iptables flush") + reloadFunc() + } +} + +// chainExists is used internally by Monitor; none of the public Interface methods can be +// used to distinguish "chain exists" from "chain does not exist" with no side effects +func (runner *runner) chainExists(table Table, chain Chain) (bool, error) { + fullArgs := makeFullArgs(table, chain) + + runner.mu.Lock() + defer runner.mu.Unlock() + + _, err := runner.run(opListChain, fullArgs) + return err == nil, err +} + type operation string const ( opCreateChain operation = "-N" opFlushChain operation = "-F" opDeleteChain operation = "-X" + opListChain operation = "-L" opAppendRule operation = "-A" opCheckRule operation = "-C" opDeleteRule operation = "-D" @@ -544,6 +579,8 @@ func makeFullArgs(table Table, chain Chain, args ...string) []string { return append([]string{string(chain), "-t", string(table)}, args...) } +const iptablesVersionPattern = `v([0-9]+(\.[0-9]+)+)` + // getIPTablesVersion runs "iptables --version" and parses the returned version func getIPTablesVersion(exec utilexec.Interface, protocol Protocol) (*utilversion.Version, error) { // this doesn't access mutable state so we don't need to use the interface / runner @@ -552,7 +589,7 @@ func getIPTablesVersion(exec utilexec.Interface, protocol Protocol) (*utilversio if err != nil { return nil, err } - versionMatcher := regexp.MustCompile("v([0-9]+(\\.[0-9]+)+)") + versionMatcher := regexp.MustCompile(iptablesVersionPattern) match := versionMatcher.FindStringSubmatch(string(bytes)) if match == nil { return nil, fmt.Errorf("no iptables version found in string: %s", bytes) @@ -578,68 +615,46 @@ func getIPTablesWaitFlag(version *utilversion.Version) []string { } // Checks if iptables-restore has a "wait" flag -func getIPTablesRestoreWaitFlag(version *utilversion.Version) []string { +func getIPTablesRestoreWaitFlag(version *utilversion.Version, exec utilexec.Interface, protocol Protocol) []string { if version.AtLeast(WaitRestoreMinVersion) { return []string{WaitString, WaitSecondsValue} - } else { - return nil } -} - -// goroutine to listen for D-Bus signals -func (runner *runner) dbusSignalHandler(bus utildbus.Connection) { - firewalld := bus.Object(firewalldName, firewalldPath) - for s := range runner.signal { - if s == nil { - // Unregister - bus.Signal(runner.signal) - return - } - - switch s.Name { - case "org.freedesktop.DBus.NameOwnerChanged": - name := s.Body[0].(string) - newOwner := s.Body[2].(string) - - if name != firewalldName || len(newOwner) == 0 { - continue - } - - // FirewallD startup (specifically the part where it deletes - // all existing iptables rules) may not yet be complete when - // we get this signal, so make a dummy request to it to - // synchronize. - firewalld.Call(firewalldInterface+".getDefaultZone", 0) - - runner.reload() - case firewalldInterface + ".Reloaded": - runner.reload() - } + // Older versions may have backported features; if iptables-restore supports + // --version, assume it also supports --wait + vstring, err := getIPTablesRestoreVersionString(exec, protocol) + if err != nil || vstring == "" { + klog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") + return nil } -} - -// AddReloadFunc is part of Interface -func (runner *runner) AddReloadFunc(reloadFunc func()) { - runner.mu.Lock() - defer runner.mu.Unlock() - - // We only need to listen to firewalld if there are Reload functions, so lazy - // initialize the listener. - if !runner.hasListener { - runner.connectToFirewallD() + if _, err := utilversion.ParseGeneric(vstring); err != nil { + klog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") + return nil } - - runner.reloadFuncs = append(runner.reloadFuncs, reloadFunc) + return []string{WaitString} } -// runs all reload funcs to re-sync iptables rules -func (runner *runner) reload() { - klog.V(1).Infof("reloading iptables rules") +// getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string +// in the form "X.X.X" +func getIPTablesRestoreVersionString(exec utilexec.Interface, protocol Protocol) (string, error) { + // this doesn't access mutable state so we don't need to use the interface / runner - for _, f := range runner.reloadFuncs { - f() + // iptables-restore hasn't always had --version, and worse complains + // about unrecognized commands but doesn't exit when it gets them. + // Work around that by setting stdin to nothing so it exits immediately. + iptablesRestoreCmd := iptablesRestoreCommand(protocol) + cmd := exec.Command(iptablesRestoreCmd, "--version") + cmd.SetStdin(bytes.NewReader([]byte{})) + bytes, err := cmd.CombinedOutput() + if err != nil { + return "", err } + versionMatcher := regexp.MustCompile(iptablesVersionPattern) + match := versionMatcher.FindStringSubmatch(string(bytes)) + if match == nil { + return "", fmt.Errorf("no iptables version found in string: %s", bytes) + } + return match[1], nil } func (runner *runner) HasRandomFully() bool { @@ -682,3 +697,16 @@ func IsNotFoundError(err error) bool { } return false } + +const iptablesStatusResourceProblem = 4 + +// isResourceError returns true if the error indicates that iptables ran into a "resource +// problem" and was unable to attempt the request. In particular, this will be true if it +// times out trying to get the iptables lock. +func isResourceError(err error) bool { + if ee, isExitError := err.(utilexec.ExitError); isExitError { + return ee.ExitStatus() == iptablesStatusResourceProblem + } else { + return false + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD index 70a51121552..c4ac2cf9249 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD @@ -16,6 +16,9 @@ go_test( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", ], @@ -36,6 +39,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ @@ -47,6 +52,9 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//vendor/k8s.io/utils/exec:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD deleted file mode 100644 index 813b14879f5..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD +++ /dev/null @@ -1,95 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "exec.go", - "fake.go", - "mount.go", - "mount_helper_common.go", - "mount_helper_unix.go", - "mount_helper_windows.go", - "mount_linux.go", - "mount_unsupported.go", - "mount_windows.go", - ], - importpath = "k8s.io/kubernetes/pkg/util/mount", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/k8s.io/utils/io:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/k8s.io/utils/keymutex:go_default_library", - "//vendor/k8s.io/utils/path:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "mount_helper_test.go", - "mount_helper_unix_test.go", - "mount_helper_windows_test.go", - "mount_linux_test.go", - "mount_test.go", - "mount_windows_test.go", - "safe_format_and_mount_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//vendor/k8s.io/utils/exec/testing:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec.go deleted file mode 100644 index 30502b7028e..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mount - -import "k8s.io/utils/exec" - -// NewOSExec returns a new Exec interface implementation based on exec() -func NewOSExec() Exec { - return &osExec{} -} - -// Real implementation of Exec interface that uses simple utils.Exec -type osExec struct{} - -var _ Exec = &osExec{} - -func (e *osExec) Run(cmd string, args ...string) ([]byte, error) { - exe := exec.New() - return exe.Command(cmd, args...).CombinedOutput() -} - -// NewFakeExec returns a new FakeExec -func NewFakeExec(run runHook) *FakeExec { - return &FakeExec{runHook: run} -} - -// FakeExec for testing. -type FakeExec struct { - runHook runHook -} -type runHook func(cmd string, args ...string) ([]byte, error) - -// Run executes the command using the optional runhook, if given -func (f *FakeExec) Run(cmd string, args ...string) ([]byte, error) { - if f.runHook != nil { - return f.runHook(cmd, args...) - } - return nil, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go index 3477dce4257..9984380db81 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -26,7 +26,7 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -139,33 +139,62 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP { // GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; // it returns empty-string for no zone. +// Since there are currently two separate zone keys: +// * "failure-domain.beta.kubernetes.io/zone" +// * "topology.kubernetes.io/zone" +// GetZoneKey will first check failure-domain.beta.kubernetes.io/zone and if not exists, will then check +// topology.kubernetes.io/zone func GetZoneKey(node *v1.Node) string { labels := node.Labels if labels == nil { return "" } - region, _ := labels[v1.LabelZoneRegion] - failureDomain, _ := labels[v1.LabelZoneFailureDomain] + // TODO: prefer stable labels for zone in v1.18 + zone, ok := labels[v1.LabelZoneFailureDomain] + if !ok { + zone, _ = labels[v1.LabelZoneFailureDomainStable] + } + + // TODO: prefer stable labels for region in v1.18 + region, ok := labels[v1.LabelZoneRegion] + if !ok { + region, _ = labels[v1.LabelZoneRegionStable] + } - if region == "" && failureDomain == "" { + if region == "" && zone == "" { return "" } // We include the null character just in case region or failureDomain has a colon // (We do assume there's no null characters in a region or failureDomain) // As a nice side-benefit, the null character is not printed by fmt.Print or glog - return region + ":\x00:" + failureDomain + return region + ":\x00:" + zone +} + +type nodeForConditionPatch struct { + Status nodeStatusForPatch `json:"status"` +} + +type nodeStatusForPatch struct { + Conditions []v1.NodeCondition `json:"conditions"` } // SetNodeCondition updates specific node condition with patch operation. func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { generatePatch := func(condition v1.NodeCondition) ([]byte, error) { - raw, err := json.Marshal(&[]v1.NodeCondition{condition}) + patch := nodeForConditionPatch{ + Status: nodeStatusForPatch{ + Conditions: []v1.NodeCondition{ + condition, + }, + }, + } + patchBytes, err := json.Marshal(&patch) if err != nil { return nil, err } - return []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw)), nil + return patchBytes, nil } condition.LastHeartbeatTime = metav1.NewTime(time.Now()) patch, err := generatePatch(condition) @@ -176,15 +205,27 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N return err } +type nodeForCIDRMergePatch struct { + Spec nodeSpecForMergePatch `json:"spec"` +} + +type nodeSpecForMergePatch struct { + PodCIDR string `json:"podCIDR"` + PodCIDRs []string `json:"podCIDRs,omitempty"` +} + // PatchNodeCIDR patches the specified node's CIDR to the given value. func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error { - raw, err := json.Marshal(cidr) + patch := nodeForCIDRMergePatch{ + Spec: nodeSpecForMergePatch{ + PodCIDR: cidr, + }, + } + patchBytes, err := json.Marshal(&patch) if err != nil { return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } - patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s}}`, raw)) - if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } @@ -193,18 +234,18 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro // PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { - rawCidrs, err := json.Marshal(cidrs) - if err != nil { - return fmt.Errorf("failed to json.Marshal CIDRs: %v", err) + // set the pod cidrs list and set the old pod cidr field + patch := nodeForCIDRMergePatch{ + Spec: nodeSpecForMergePatch{ + PodCIDR: cidrs[0], + PodCIDRs: cidrs, + }, } - rawCidr, err := json.Marshal(cidrs[0]) + patchBytes, err := json.Marshal(&patch) if err != nil { return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } - - // set the pod cidrs list and set the old pod cidr field - patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s , "podCIDRs":%s}}`, rawCidr, rawCidrs)) klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD index a4f55ea5c04..36f7a8a57c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD @@ -17,6 +17,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/oom", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/cm/util:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm/util:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -30,6 +34,10 @@ go_test( srcs = ["oom_linux_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go index 81d4304fa2b..079a9b63c9a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go @@ -28,7 +28,7 @@ import ( // PatchPodStatus patches pod status. func PatchPodStatus(c clientset.Interface, namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, error) { - patchBytes, err := preparePatchBytesforPodStatus(namespace, name, oldPodStatus, newPodStatus) + patchBytes, err := preparePatchBytesForPodStatus(namespace, name, oldPodStatus, newPodStatus) if err != nil { return nil, nil, err } @@ -40,7 +40,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, oldPodStatus, return updatedPod, patchBytes, nil } -func preparePatchBytesforPodStatus(namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, error) { +func preparePatchBytesForPodStatus(namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, error) { oldData, err := json.Marshal(v1.Pod{ Status: oldPodStatus, }) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD index 223f2265fb6..d612a9a1251 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD @@ -17,6 +17,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/procfs", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -33,6 +37,9 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD index 9cdce022b72..7b1fe6cc87b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD @@ -11,8 +11,8 @@ go_test( srcs = ["removeall_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -20,7 +20,7 @@ go_library( name = "go_default_library", srcs = ["removeall.go"], importpath = "k8s.io/kubernetes/pkg/util/removeall", - deps = ["//pkg/util/mount:go_default_library"], + deps = ["//vendor/k8s.io/utils/mount:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go index fa15ac1bdd3..66717ee29d0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go @@ -22,7 +22,7 @@ import ( "os" "syscall" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // RemoveAllOneFilesystem removes path and any children it contains. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD index 04f11e1fc0c..b4144f1255e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD @@ -10,38 +10,42 @@ go_library( visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:android": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:plan9": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go index 4eabdb1ddc0..5ec8a387766 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go @@ -22,7 +22,7 @@ import ( "fmt" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // ResizeFs Provides support for resizing file systems @@ -61,7 +61,7 @@ func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (boo } func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { - output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) + output, err := resizefs.mounter.Exec.Command("resize2fs", devicePath).CombinedOutput() if err == nil { klog.V(2).Infof("Device %s resized successfully", devicePath) return true, nil @@ -74,7 +74,7 @@ func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { func (resizefs *ResizeFs) xfsResize(deviceMountPath string) (bool, error) { args := []string{"-d", deviceMountPath} - output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) + output, err := resizefs.mounter.Exec.Command("xfs_growfs", args...).CombinedOutput() if err == nil { klog.V(2).Infof("Device %s resized successfully", deviceMountPath) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go index dd4dd017e82..cde1d4ca082 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go @@ -21,7 +21,7 @@ package resizefs import ( "fmt" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // ResizeFs Provides support for resizing file systems diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD index f8fbfa05457..a151bcea398 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD @@ -13,6 +13,9 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/rlimit", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD index 868c77c7091..f77447b4d32 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD @@ -15,6 +15,9 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/selinux", deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/.gitattributes deleted file mode 100644 index 7e349eff60b..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -base.go export-subst diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/BUILD deleted file mode 100644 index 26b0a161238..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "doc.go", - "version.go", - ], - importpath = "k8s.io/kubernetes/pkg/version", - deps = ["//staging/src/k8s.io/apimachinery/pkg/version:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/version/prometheus:all-srcs", - "//pkg/version/verflag:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/base.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/base.go deleted file mode 100644 index 730e79f03d1..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/base.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -// Base version information. -// -// This is the fallback data used when version information from git is not -// provided via go ldflags. It provides an approximation of the Kubernetes -// version for ad-hoc builds (e.g. `go build`) that cannot get the version -// information from git. -// -// If you are looking at these fields in the git tree, they look -// strange. They are modified on the fly by the build process. The -// in-tree values are dummy values used for "git archive", which also -// works for GitHub tar downloads. -// -// When releasing a new Kubernetes version, this file is updated by -// build/mark_new_version.sh to reflect the new version, and then a -// git annotated tag (using format vX.Y where X == Major version and Y -// == Minor version) is created to point to the commit that updates -// pkg/version/base.go -var ( - // TODO: Deprecate gitMajor and gitMinor, use only gitVersion - // instead. First step in deprecation, keep the fields but make - // them irrelevant. (Next we'll take it out, which may muck with - // scripts consuming the kubectl version output - but most of - // these should be looking at gitVersion already anyways.) - gitMajor string // major version, always numeric - gitMinor string // minor version, numeric possibly followed by "+" - - // semantic version, derived by build scripts (see - // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md - // for a detailed discussion of this field) - // - // TODO: This field is still called "gitVersion" for legacy - // reasons. For prerelease versions, the build metadata on the - // semantic version is a git hash, but the version itself is no - // longer the direct output of "git describe", but a slight - // translation to be semver compliant. - - // NOTE: The $Format strings are replaced during 'git archive' thanks to the - // companion .gitattributes file containing 'export-subst' in this same - // directory. See also https://git-scm.com/docs/gitattributes - gitVersion = "v0.0.0-master+$Format:%h$" - gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState = "" // state of git tree, either "clean" or "dirty" - - buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/def.bzl b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/def.bzl deleted file mode 100644 index 302893b1b88..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/def.bzl +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. -def version_x_defs(): - # This should match the list of packages in kube::version::ldflag - stamp_pkgs = [ - "k8s.io/kubernetes/pkg/version", - "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", - ] - - # This should match the list of vars in kube::version::ldflags - # It should also match the list of vars set in hack/print-workspace-status.sh. - stamp_vars = [ - "buildDate", - "gitCommit", - "gitMajor", - "gitMinor", - "gitTreeState", - "gitVersion", - ] - - # Generate the cross-product. - x_defs = {} - for pkg in stamp_pkgs: - for var in stamp_vars: - x_defs["%s.%s" % (pkg, var)] = "{%s}" % var - return x_defs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/version.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/version.go deleted file mode 100644 index 8c8350d13b7..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/version.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "runtime" - - apimachineryversion "k8s.io/apimachinery/pkg/version" -) - -// Get returns the overall codebase version. It's for detecting -// what code a binary was built from. -func Get() apimachineryversion.Info { - // These variables typically come from -ldflags settings and in - // their absence fallback to the settings in pkg/version/base.go - return apimachineryversion.Info{ - Major: gitMajor, - Minor: gitMinor, - GitVersion: gitVersion, - GitCommit: gitCommit, - GitTreeState: gitTreeState, - BuildDate: buildDate, - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD index edef4b8bee4..8e608ecc467 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD @@ -19,7 +19,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume/util/fs:go_default_library", "//pkg/volume/util/hostutil:go_default_library", "//pkg/volume/util/recyclerclient:go_default_library", @@ -34,11 +33,14 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -58,6 +60,9 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], @@ -83,6 +88,7 @@ filegroup( "//pkg/volume/cinder:all-srcs", "//pkg/volume/configmap:all-srcs", "//pkg/volume/csi:all-srcs", + "//pkg/volume/csimigration:all-srcs", "//pkg/volume/downwardapi:all-srcs", "//pkg/volume/emptydir:all-srcs", "//pkg/volume/fc:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD index 5e21539885d..0a64e9943ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD @@ -21,7 +21,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/awsebs", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -35,6 +34,7 @@ go_library( "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -48,7 +48,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -61,6 +60,7 @@ go_test( "//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go index 365ebb3e082..35bf55df8fe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go @@ -28,10 +28,10 @@ import ( "time" "k8s.io/klog" + "k8s.io/utils/mount" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/aws" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_linux.go index 562a567ca34..43c5182d343 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_linux.go @@ -1,3 +1,4 @@ +// +build !providerless // +build linux /* diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_unsupported.go index efbd78acf0c..e5874ad71b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_unsupported.go @@ -1,3 +1,4 @@ +// +build !providerless // +build !linux,!windows /* diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_windows.go index 6ad160410c2..7d00e07a915 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher_windows.go @@ -1,3 +1,4 @@ +// +build !providerless // +build windows /* @@ -41,7 +42,7 @@ func (attacher *awsElasticBlockStoreAttacher) getDiskNumber(volumeID string) (st volumeID = split[len(split)-1] exec := attacher.host.GetExec(awsElasticBlockStorePluginName) - output, err := exec.Run(ebsnvmeID) + output, err := exec.Command(ebsnvmeID).CombinedOutput() if err != nil { return "", fmt.Errorf("error calling ebsnvme-id.exe: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go index 5fc5c095712..7bae0358e34 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go @@ -29,6 +29,7 @@ import ( "strings" "k8s.io/klog" + "k8s.io/utils/mount" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -36,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/aws" @@ -89,11 +89,6 @@ func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil) } -func (plugin *awsElasticBlockStorePlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && - utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) -} - func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool { return false } @@ -263,41 +258,13 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath if err != nil { return nil, err } - // This is a workaround to fix the issue in converting aws volume id from globalPDPath - // There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are: - // aws:///vol-1234 (aws/vol-1234) - // aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234) - // vol-1234 (vol-1234) - // This code is for converting volume id to aws style volume id for the first two cases. - sourceName := volumeID - if strings.HasPrefix(volumeID, "aws/") { - names := strings.Split(volumeID, "/") - length := len(names) - if length < 2 || length > 3 { - return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID) - } - volName := names[length-1] - if !strings.HasPrefix(volName, "vol-") { - return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath) - } - if length == 2 { - sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label - } - if length == 3 { - sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label - } - klog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) + volumeID, err = formatVolumeID(volumeID) + if err != nil { + return nil, fmt.Errorf("failed to get AWS volume id from mount path %q: %v", mountPath, err) } - awsVolume := &v1.Volume{ - Name: volName, - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ - VolumeID: sourceName, - }, - }, - } - return volume.NewSpecFromVolume(awsVolume), nil + file := v1.PersistentVolumeFilesystem + return newAWSVolumeSpec(volName, volumeID, file), nil } func (plugin *awsElasticBlockStorePlugin) RequiresFSResize() bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go index a991474a91f..86e380288e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go @@ -24,15 +24,15 @@ import ( "strconv" "strings" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" "k8s.io/legacy-cloud-providers/aws" - utilstrings "k8s.io/utils/strings" ) var _ volume.BlockVolumePlugin = &awsElasticBlockStorePlugin{} @@ -51,34 +51,27 @@ func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types. return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return plugin.getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func (plugin *awsElasticBlockStorePlugin) getVolumeSpecFromGlobalMapPath(volumeName string, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} // plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX - vID := filepath.Base(globalMapPath) - if len(vID) <= 1 { - return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath) - } - if !strings.Contains(vID, "vol-") { - return nil, fmt.Errorf("failed to get volumeID from global path=%s, invalid volumeID format = %s", globalMapPath, vID) + pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName) + if !strings.HasPrefix(globalMapPath, pluginDir) { + return nil, fmt.Errorf("volume symlink %s is not in global plugin directory", globalMapPath) } - block := v1.PersistentVolumeBlock - awsVolume := &v1.PersistentVolume{ - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ - VolumeID: vID, - }, - }, - VolumeMode: &block, - }, + fullVolumeID := strings.TrimPrefix(globalMapPath, pluginDir) // /vol-XXXXXX + fullVolumeID = strings.TrimLeft(fullVolumeID, "/") // vol-XXXXXX + vID, err := formatVolumeID(fullVolumeID) + if err != nil { + return nil, fmt.Errorf("failed to get AWS volume id from map path %q: %v", globalMapPath, err) } - return volume.NewSpecFromPersistentVolume(awsVolume, true), nil + block := v1.PersistentVolumeBlock + return newAWSVolumeSpec(volumeName, vID, block), nil } // NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. @@ -133,10 +126,6 @@ func (plugin *awsElasticBlockStorePlugin) newUnmapperInternal(volName string, po }}, nil } -func (c *awsElasticBlockStoreUnmapper) TearDownDevice(mapPath, devicePath string) error { - return nil -} - type awsElasticBlockStoreUnmapper struct { *awsElasticBlockStore } @@ -150,14 +139,6 @@ type awsElasticBlockStoreMapper struct { var _ volume.BlockVolumeMapper = &awsElasticBlockStoreMapper{} -func (b *awsElasticBlockStoreMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *awsElasticBlockStoreMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID // plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go index 2f69d54f9b9..be53866ee06 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go @@ -27,13 +27,14 @@ import ( "time" "k8s.io/klog" + "k8s.io/utils/mount" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/aws" @@ -287,3 +288,50 @@ func findNvmeVolume(findName string) (device string, err error) { return resolved, nil } + +func formatVolumeID(volumeID string) (string, error) { + // This is a workaround to fix the issue in converting aws volume id from globalPDPath and globalMapPath + // There are three formats for AWSEBSVolumeSource.VolumeID and they are stored on disk in paths like so: + // VolumeID mountPath mapPath + // aws:///vol-1234 aws/vol-1234 aws:/vol-1234 + // aws://us-east-1/vol-1234 aws/us-east-1/vol-1234 aws:/us-east-1/vol-1234 + // vol-1234 vol-1234 vol-1234 + // This code is for converting volume ids from paths back to AWS style VolumeIDs + sourceName := volumeID + if strings.HasPrefix(volumeID, "aws/") || strings.HasPrefix(volumeID, "aws:/") { + names := strings.Split(volumeID, "/") + length := len(names) + if length < 2 || length > 3 { + return "", fmt.Errorf("invalid volume name format %q", volumeID) + } + volName := names[length-1] + if !strings.HasPrefix(volName, "vol-") { + return "", fmt.Errorf("Invalid volume name format for AWS volume (%q)", volName) + } + if length == 2 { + sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label + } + if length == 3 { + sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label + } + klog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) + } + return sourceName, nil +} + +func newAWSVolumeSpec(volumeName, volumeID string, mode v1.PersistentVolumeMode) *volume.Spec { + awsVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: volumeID, + }, + }, + VolumeMode: &mode, + }, + } + return volume.NewSpecFromPersistentVolume(awsVolume, false) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD index e9345051380..9e607d94ef0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD @@ -25,7 +25,6 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -42,8 +41,47 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( @@ -70,7 +108,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -79,5 +116,6 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go index 28592bc7e94..42296172f0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go @@ -29,12 +29,12 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "k8s.io/klog" + "k8s.io/utils/mount" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/azure" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go index 89a08750ca6..f37e8686a15 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go @@ -26,7 +26,7 @@ import ( libstrings "strings" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" ) // exclude those used by azure as resource and OS root in /dev/disk/azure @@ -69,7 +69,7 @@ func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, er return "", fmt.Errorf("read %s error: %v", devLinkPath, err) } -func scsiHostRescan(io ioHandler, exec mount.Exec) { +func scsiHostRescan(io ioHandler, exec utilexec.Interface) { scsi_path := "/sys/class/scsi_host/" if dirs, err := io.ReadDir(scsi_path); err == nil { for _, f := range dirs { @@ -84,7 +84,7 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) { } } -func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) { +func findDiskByLun(lun int, io ioHandler, exec utilexec.Interface) (string, error) { azureDisks := listAzureDiskPath(io) return findDiskByLunWithConstraint(lun, io, azureDisks) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go index 842f023332f..550506d6df5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go @@ -19,11 +19,11 @@ limitations under the License. package azure_dd -import "k8s.io/kubernetes/pkg/util/mount" +import "k8s.io/utils/exec" -func scsiHostRescan(io ioHandler, exec mount.Exec) { +func scsiHostRescan(io ioHandler, exec exec.Interface) { } -func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) { +func findDiskByLun(lun int, io ioHandler, exec exec.Interface) (string, error) { return "", nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go index 85bff9ccacd..0b8f9004d68 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go @@ -26,22 +26,22 @@ import ( "strings" "k8s.io/klog" - - "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" ) -func scsiHostRescan(io ioHandler, exec mount.Exec) { +func scsiHostRescan(io ioHandler, exec utilexec.Interface) { cmd := "Update-HostStorageCache" - output, err := exec.Run("powershell", "/c", cmd) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() if err != nil { klog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output)) } } // search Windows disk number by LUN -func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error) { +func findDiskByLun(lun int, iohandler ioHandler, exec utilexec.Interface) (string, error) { cmd := `Get-Disk | select number, location | ConvertTo-Json` - output, err := exec.Run("powershell", "/c", cmd) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() if err != nil { klog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output)) return "", err @@ -98,7 +98,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error return "", nil } -func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { +func formatIfNotFormatted(disk string, fstype string, exec utilexec.Interface) { if err := mount.ValidateDiskNumber(disk); err != nil { klog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err) return @@ -110,7 +110,7 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { } cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk) cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype) - output, err := exec.Run("powershell", "/c", cmd) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() if err != nil { klog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output)) } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go index 14d997808c4..02de3a1473e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go @@ -31,8 +31,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/azure" @@ -122,11 +120,6 @@ func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.AzureDisk != nil) } -func (plugin *azureDataDiskPlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && - utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) -} - func (plugin *azureDataDiskPlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go index 39bdb28edb9..d610afd06d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go @@ -22,15 +22,15 @@ import ( "fmt" "path/filepath" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) var _ volume.VolumePlugin = &azureDataDiskPlugin{} @@ -119,10 +119,6 @@ func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID ty return &azureDataDiskUnmapper{dataDisk: disk}, nil } -func (c *azureDataDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { - return nil -} - type azureDataDiskUnmapper struct { *dataDisk } @@ -136,14 +132,6 @@ type azureDataDiskMapper struct { var _ volume.BlockVolumeMapper = &azureDataDiskMapper{} -func (b *azureDataDiskMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *azureDataDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID // plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go index f2ed18c5d92..2f8d38bd0aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go @@ -23,9 +23,10 @@ import ( "os" "runtime" - "k8s.io/api/core/v1" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index 1f6efb64a1e..6936e40428d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -131,8 +131,9 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie availabilityZones sets.String selectedAvailabilityZone string - diskIopsReadWrite string - diskMbpsReadWrite string + diskIopsReadWrite string + diskMbpsReadWrite string + diskEncryptionSetID string ) // maxLength = 79 - (4 for ".vhd") = 75 name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) @@ -175,6 +176,8 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie diskIopsReadWrite = v case "diskmbpsreadwrite": diskMbpsReadWrite = v + case "diskencryptionsetid": + diskEncryptionSetID = v default: return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } @@ -244,15 +247,16 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie } volumeOptions := &azure.ManagedDiskOptions{ - DiskName: name, - StorageAccountType: skuName, - ResourceGroup: resourceGroup, - PVCName: p.options.PVC.Name, - SizeGB: requestGiB, - Tags: tags, - AvailabilityZone: selectedAvailabilityZone, - DiskIOPSReadWrite: diskIopsReadWrite, - DiskMBpsReadWrite: diskMbpsReadWrite, + DiskName: name, + StorageAccountType: skuName, + ResourceGroup: resourceGroup, + PVCName: p.options.PVC.Name, + SizeGB: requestGiB, + Tags: tags, + AvailabilityZone: selectedAvailabilityZone, + DiskIOPSReadWrite: diskIopsReadWrite, + DiskMBpsReadWrite: diskMbpsReadWrite, + DiskEncryptionSetID: diskEncryptionSetID, } diskURI, err = diskController.CreateManagedDisk(volumeOptions) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD index aa27444aeef..1c4640f78a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -15,9 +9,8 @@ go_library( "doc.go", ], importpath = "k8s.io/kubernetes/pkg/volume/azure_file", + visibility = ["//visibility:public"], deps = [ - "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -25,12 +18,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -40,7 +33,6 @@ go_test( srcs = ["azure_file_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -50,6 +42,7 @@ go_test( "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -64,4 +57,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go index d6c25bc0c79..cd1a13ba369 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go @@ -24,20 +24,19 @@ import ( "os" "runtime" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/azure" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary endpoint for volume plugins @@ -85,11 +84,6 @@ func (plugin *azureFilePlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.AzureFile != nil) } -func (plugin *azureFilePlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && - utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFile) -} - func (plugin *azureFilePlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD index 993481194dc..b521e4b48c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD @@ -14,13 +14,13 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/cephfs", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -30,12 +30,12 @@ go_test( srcs = ["cephfs_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go index 590e7e4b7ca..4ba29214fd9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go @@ -24,14 +24,15 @@ import ( "runtime" "strings" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -71,10 +72,6 @@ func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool { return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil) } -func (plugin *cephfsPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *cephfsPlugin) RequiresRemount() bool { return false } @@ -331,7 +328,7 @@ func (cephfsVolume *cephfsMounter) checkFuseMount() bool { execute := cephfsVolume.plugin.host.GetExec(cephfsVolume.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil { + if _, err := execute.Command("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph").CombinedOutput(); err == nil { klog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.") return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD index a51e18c3d9f..6d8674c91d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/cinder", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -36,6 +35,7 @@ go_library( "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -49,7 +49,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -61,6 +60,7 @@ go_test( "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/openstack:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go index d7ddb29dc1e..ebf84d31d0f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go @@ -26,11 +26,12 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index 040ea619789..d09baeb0caa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -25,20 +25,21 @@ import ( "path" "path/filepath" + "k8s.io/klog" + "k8s.io/utils/keymutex" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/openstack" - "k8s.io/utils/keymutex" - utilstrings "k8s.io/utils/strings" ) const ( @@ -111,11 +112,6 @@ func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool { return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil) } -func (plugin *cinderPlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && - utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) -} - func (plugin *cinderPlugin) RequiresRemount() bool { return false } @@ -524,7 +520,7 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { defer c.plugin.volumeLocks.UnlockKey(c.pdName) // Reload list of references, there might be SetUpAt finished in the meantime - refs, err = c.mounter.GetMountRefs(dir) + _, err = c.mounter.GetMountRefs(dir) if err != nil { klog.V(4).Infof("GetMountRefs failed: %v", err) return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go index c03310aa7a4..5c4273b95b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go @@ -22,14 +22,15 @@ import ( "fmt" "path/filepath" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) var _ volume.VolumePlugin = &cinderPlugin{} @@ -53,10 +54,10 @@ func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeNam return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -67,6 +68,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock cinderVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{ @@ -125,10 +129,6 @@ func (plugin *cinderPlugin) newUnmapperInternal(volName string, podUID types.UID }}, nil } -func (c *cinderPluginUnmapper) TearDownDevice(mapPath, devicePath string) error { - return nil -} - type cinderPluginUnmapper struct { *cinderVolume } @@ -142,14 +142,6 @@ type cinderVolumeMapper struct { var _ volume.BlockVolumeMapper = &cinderVolumeMapper{} -func (b *cinderVolumeMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *cinderVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID // plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD index a8329fa8b07..ab7d2571142 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD @@ -14,7 +14,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/configmap", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -22,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS index 93c0d5e773d..8ef14a0c2a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS @@ -4,6 +4,7 @@ approvers: - pmorie - saad-ali - thockin +emeritus_approvers: - matchstick reviewers: - ivan4th diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index cdaa54ac014..7b0d900f16e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -19,15 +19,16 @@ package configmap import ( "fmt" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -77,10 +78,6 @@ func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.ConfigMap != nil } -func (plugin *configMapPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *configMapPlugin) RequiresRemount() bool { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD index 097c2ad94ed..1b4bf551c8f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD @@ -18,7 +18,6 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/csi/csiv0:go_default_library", "//pkg/volume/csi/nodeinfomanager:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -97,7 +96,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/volume/csi/csiv0:all-srcs", "//pkg/volume/csi/fake:all-srcs", "//pkg/volume/csi/nodeinfomanager:all-srcs", "//pkg/volume/csi/testing:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS index 668ad7b2b14..a8fbd458366 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS @@ -4,3 +4,4 @@ approvers: - jsafrane - saad-ali - vladimirvivien +- davidz627 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go index 283dd09f2e5..1ca94ddfdfa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -28,7 +28,7 @@ import ( "k8s.io/klog" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,6 +51,8 @@ type csiAttacher struct { csiClient csiClient } +type verifyAttachDetachStatus func(attach *storage.VolumeAttachment, volumeHandle string) (bool, error) + // volume.Attacher methods var _ volume.Attacher = &csiAttacher{} @@ -148,79 +150,18 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim } func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) { + klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err) } - successful, err := verifyAttachmentStatus(attach, volumeHandle) + err = c.waitForVolumeAttachDetachStatus(attach, volumeHandle, attachID, timer, timeout, verifyAttachmentStatus) if err != nil { return "", err } - if successful { - return attachID, nil - } - - watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) - if err != nil { - return "", fmt.Errorf("watch error:%v for volume %v", err, volumeHandle) - } - - ch := watcher.ResultChan() - defer watcher.Stop() - - for { - select { - case event, ok := <-ch: - if !ok { - klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) - return "", errors.New("volume attachment watch channel had been closed") - } - - switch event.Type { - case watch.Added, watch.Modified: - attach, _ := event.Object.(*storage.VolumeAttachment) - successful, err := verifyAttachmentStatus(attach, volumeHandle) - if err != nil { - return "", err - } - if successful { - return attachID, nil - } - case watch.Deleted: - // if deleted, fail fast - klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID)) - return "", errors.New("volume attachment has been deleted") - - case watch.Error: - klog.Warningf("waitForVolumeAttachmentInternal received watch error: %v", event) - } - - case <-timer.C: - klog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) - return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle) - } - } -} - -func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { - // if being deleted, fail fast - if attachment.GetDeletionTimestamp() != nil { - klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name)) - return false, errors.New("volume attachment is being deleted") - } - // attachment OK - if attachment.Status.Attached { - return true, nil - } - // driver reports attach error - attachErr := attachment.Status.AttachError - if attachErr != nil { - klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) - return false, errors.New(attachErr.Message) - } - return false, nil + return attach.Name, nil } func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { @@ -318,8 +259,8 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo } if err = saveVolumeData(dataDir, volDataFileName, data); err != nil { klog.Error(log("failed to save volume info data: %v", err)) - if cleanerr := os.RemoveAll(dataDir); cleanerr != nil { - klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr)) + if cleanErr := os.RemoveAll(dataDir); cleanErr != nil { + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanErr)) } return err } @@ -445,20 +386,21 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { } klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) - return c.waitForVolumeDetachment(volID, attachID) + err := c.waitForVolumeDetachment(volID, attachID, csiTimeout) + return err } -func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error { +func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string, timeout time.Duration) error { klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) - timeout := c.waitSleepTime * 10 timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable defer timer.Stop() return c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout) } -func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error { +func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, + timeout time.Duration) error { klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { @@ -469,25 +411,30 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str } return errors.New(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) } - // driver reports attach error - detachErr := attach.Status.DetachError - if detachErr != nil { - klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) - return errors.New(detachErr.Message) + err = c.waitForVolumeAttachDetachStatus(attach, volumeHandle, attachID, timer, timeout, verifyDetachmentStatus) + if err != nil { + return err + } + return err +} + +func (c *csiAttacher) waitForVolumeAttachDetachStatus(attach *storage.VolumeAttachment, volumeHandle, attachID string, + timer *time.Timer, timeout time.Duration, verifyStatus verifyAttachDetachStatus) error { + successful, err := verifyStatus(attach, volumeHandle) + if err != nil { + return err + } + if successful { + return nil } watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) if err != nil { - return errors.New(log("watch error:%v for volume %v", err, volumeHandle)) + return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle) } - var watcherClosed bool - ch := watcher.ResultChan() - defer func() { - if !watcherClosed { - watcher.Stop() - } - }() + ch := watcher.ResultChan() + defer watcher.Stop() for { select { case event, ok := <-ch: @@ -499,27 +446,30 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str switch event.Type { case watch.Added, watch.Modified: attach, _ := event.Object.(*storage.VolumeAttachment) - // driver reports attach error - detachErr := attach.Status.DetachError - if detachErr != nil { - klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) - return errors.New(detachErr.Message) + successful, err := verifyStatus(attach, volumeHandle) + if err != nil { + return err + } + if successful { + return nil } case watch.Deleted: - //object deleted + // set attach nil to get different results + // for detachment, a deleted event means successful detachment, should return success + // for attachment, should return fail + if successful, err := verifyStatus(nil, volumeHandle); !successful { + return err + } klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle)) return nil case watch.Error: - watcher.Stop() - watcherClosed = true - // start another cycle - return c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout) + klog.Warningf("waitForVolumeAttachDetachInternal received watch error: %v", event) } case <-timer.C: - klog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) - return fmt.Errorf("detachment timeout for volume %v", volumeHandle) + klog.Error(log("attachdetacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + return fmt.Errorf("attachdetachment timeout for volume %v", volumeHandle) } } } @@ -646,3 +596,42 @@ func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMoun return csiSource.Driver, csiSource.VolumeHandle, nil } + +func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { + // when we received a deleted event during attachment, fail fast + if attachment == nil { + klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", volumeHandle)) + return false, errors.New("volume attachment has been deleted") + } + // if being deleted, fail fast + if attachment.GetDeletionTimestamp() != nil { + klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name)) + return false, errors.New("volume attachment is being deleted") + } + // attachment OK + if attachment.Status.Attached { + return true, nil + } + // driver reports attach error + attachErr := attachment.Status.AttachError + if attachErr != nil { + klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) + return false, errors.New(attachErr.Message) + } + return false, nil +} + +func verifyDetachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { + // when we received a deleted event during detachment + // it means we have successfully detached it. + if attachment == nil { + return true, nil + } + // driver reports detach error + detachErr := attachment.Status.DetachError + if detachErr != nil { + klog.Error(log("detachment for VolumeAttachment for volume [%s] failed: %v", volumeHandle, detachErr.Message)) + return false, errors.New(detachErr.Message) + } + return false, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go index 5e6dc145caa..8b46d3c3e72 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go @@ -14,6 +14,55 @@ See the License for the specific language governing permissions and limitations under the License. */ +/* +This file defines block volume related methods for CSI driver. +CSI driver is responsible for staging/publishing volumes to their staging/publish paths. +Mapping and unmapping of a device in a publish path to its global map path and its +pod device map path are done by operation_executor through MapBlockVolume/UnmapBlockVolume +(MapBlockVolume and UnmapBlockVolume take care for lock, symlink, and bind mount). + +Summary of block volume related CSI driver's methods are as follows: + - GetGlobalMapPath returns a global map path, + - GetPodDeviceMapPath returns a pod device map path and filename, + - SetUpDevice calls CSI's NodeStageVolume and stage a volume to its staging path, + - MapPodDevice calls CSI's NodePublishVolume and publish a volume to its publish path, + - UnmapPodDevice calls CSI's NodeUnpublishVolume and unpublish a volume from its publish path, + - TearDownDevice calls CSI's NodeUnstageVolume and unstage a volume from its staging path. + +These methods are called by below sequences: + - operation_executor.MountVolume + - csi.GetGlobalMapPath + - csi.SetupDevice + - NodeStageVolume + - ASW.MarkDeviceAsMounted + - csi.GetPodDeviceMapPath + - csi.MapPodDevice + - NodePublishVolume + - util.MapBlockVolume + - ASW.MarkVolumeAsMounted + + - operation_executor.UnmountVolume + - csi.GetPodDeviceMapPath + - util.UnmapBlockVolume + - csi.UnmapPodDevice + - NodeUnpublishVolume + - ASW.MarkVolumeAsUnmounted + + - operation_executor.UnmountDevice + - csi.TearDownDevice + - NodeUnstageVolume + - ASW.MarkDeviceAsUnmounted + +After successful MountVolume for block volume, directory structure will be like below: + /dev/loopX ... Descriptor lock(Loopback device to mapFile under global map path) + /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/{specName}/dev/ ... Global map path + /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/{specName}/dev/{podUID} ... MapFile(Bind mount to publish Path) + /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/staging/{specName} ... Staging path + /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish/{specName}/{podUID} ... Publish path + /var/lib/kubelet/pods/{podUID}/volumeDevices/kubernetes.io~csi/ ... Pod device map path + /var/lib/kubelet/pods/{podUID}/volumeDevices/kubernetes.io~csi/{specName} ... MapFile(Symlink to publish path) +*/ + package csi import ( @@ -31,7 +80,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/volume" - ioutil "k8s.io/kubernetes/pkg/volume/util" utilstrings "k8s.io/utils/strings" ) @@ -49,36 +97,34 @@ type csiBlockMapper struct { } var _ volume.BlockVolumeMapper = &csiBlockMapper{} +var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{} // GetGlobalMapPath returns a global map path (on the node) to a device file which will be symlinked to -// Example: plugins/kubernetes.io/csi/volumeDevices/{pvname}/dev +// Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) { - dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host) + dir := getVolumeDevicePluginDir(m.specName, m.plugin.host) klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) return dir, nil } // getStagingPath returns a staging path for a directory (on the node) that should be used on NodeStageVolume/NodeUnstageVolume -// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{pvname} +// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{specName} func (m *csiBlockMapper) getStagingPath() string { - sanitizedSpecVolID := utilstrings.EscapeQualifiedName(m.specName) - return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "staging", sanitizedSpecVolID) + return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "staging", m.specName) } // getPublishPath returns a publish path for a file (on the node) that should be used on NodePublishVolume/NodeUnpublishVolume -// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{pvname} +// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{specName}/{podUID} func (m *csiBlockMapper) getPublishPath() string { - sanitizedSpecVolID := utilstrings.EscapeQualifiedName(m.specName) - return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "publish", sanitizedSpecVolID) + return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "publish", m.specName, string(m.podUID)) } // GetPodDeviceMapPath returns pod's device file which will be mapped to a volume -// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi, {pvname} +// returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName} func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) { path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName)) - specName := m.specName - klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName)) - return path, specName + klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName)) + return path, m.specName } // stageVolumeForBlock stages a block volume to stagingPath @@ -150,7 +196,6 @@ func (m *csiBlockMapper) publishVolumeForBlock( accessMode v1.PersistentVolumeAccessMode, csiSource *v1.CSIPersistentVolumeSource, attachment *storage.VolumeAttachment, - stagingPath string, ) (string, error) { klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called")) @@ -186,7 +231,7 @@ func (m *csiBlockMapper) publishVolumeForBlock( ctx, m.volumeID, m.readOnly, - stagingPath, + m.getStagingPath(), publishPath, accessMode, publishVolumeInfo, @@ -204,26 +249,26 @@ func (m *csiBlockMapper) publishVolumeForBlock( } // SetUpDevice ensures the device is attached returns path where the device is located. -func (m *csiBlockMapper) SetUpDevice() (string, error) { +func (m *csiBlockMapper) SetUpDevice() error { if !m.plugin.blockEnabled { - return "", errors.New("CSIBlockVolume feature not enabled") + return errors.New("CSIBlockVolume feature not enabled") } klog.V(4).Infof(log("blockMapper.SetUpDevice called")) // Get csiSource from spec if m.spec == nil { - return "", errors.New(log("blockMapper.SetUpDevice spec is nil")) + return errors.New(log("blockMapper.SetUpDevice spec is nil")) } csiSource, err := getCSISourceFromSpec(m.spec) if err != nil { - return "", errors.New(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err)) + return errors.New(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err)) } driverName := csiSource.Driver skip, err := m.plugin.skipAttach(driverName) if err != nil { - return "", errors.New(log("blockMapper.SetupDevice failed to check CSIDriver for %s: %v", driverName, err)) + return errors.New(log("blockMapper.SetupDevice failed to check CSIDriver for %s: %v", driverName, err)) } var attachment *storage.VolumeAttachment @@ -233,7 +278,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - return "", errors.New(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) + return errors.New(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) } } @@ -248,17 +293,67 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { csiClient, err := m.csiClientGetter.Get() if err != nil { - return "", errors.New(log("blockMapper.SetUpDevice failed to get CSI client: %v", err)) + return errors.New(log("blockMapper.SetUpDevice failed to get CSI client: %v", err)) } // Call NodeStageVolume - stagingPath, err := m.stageVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment) + _, err = m.stageVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment) if err != nil { - return "", err + return err + } + + return nil +} + +func (m *csiBlockMapper) MapPodDevice() (string, error) { + if !m.plugin.blockEnabled { + return "", errors.New("CSIBlockVolume feature not enabled") + } + klog.V(4).Infof(log("blockMapper.MapPodDevice called")) + + // Get csiSource from spec + if m.spec == nil { + return "", errors.New(log("blockMapper.MapPodDevice spec is nil")) + } + + csiSource, err := getCSISourceFromSpec(m.spec) + if err != nil { + return "", errors.New(log("blockMapper.MapPodDevice failed to get CSI persistent source: %v", err)) + } + + driverName := csiSource.Driver + skip, err := m.plugin.skipAttach(driverName) + if err != nil { + return "", errors.New(log("blockMapper.MapPodDevice failed to check CSIDriver for %s: %v", driverName, err)) + } + + var attachment *storage.VolumeAttachment + if !skip { + // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName + nodeName := string(m.plugin.host.GetNodeName()) + attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) + attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + if err != nil { + return "", errors.New(log("blockMapper.MapPodDevice failed to get volume attachment [id=%v]: %v", attachID, err)) + } + } + + //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI + accessMode := v1.ReadWriteOnce + if m.spec.PersistentVolume.Spec.AccessModes != nil { + accessMode = m.spec.PersistentVolume.Spec.AccessModes[0] + } + + ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) + defer cancel() + + csiClient, err := m.csiClientGetter.Get() + if err != nil { + return "", errors.New(log("blockMapper.MapPodDevice failed to get CSI client: %v", err)) } // Call NodePublishVolume - publishPath, err := m.publishVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment, stagingPath) + publishPath, err := m.publishVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment) if err != nil { return "", err } @@ -266,11 +361,8 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { return publishPath, nil } -func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - var _ volume.BlockVolumeUnmapper = &csiBlockMapper{} +var _ volume.CustomBlockVolumeUnmapper = &csiBlockMapper{} // unpublishVolumeForBlock unpublishes a block volume from publishPath func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiClient, publishPath string) error { @@ -321,8 +413,6 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error return errors.New("CSIBlockVolume feature not enabled") } - klog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath)) - ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() @@ -331,31 +421,48 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error return errors.New(log("blockMapper.TearDownDevice failed to get CSI client: %v", err)) } - // Call NodeUnpublishVolume - publishPath := m.getPublishPath() - if _, err := os.Stat(publishPath); err != nil { + // Call NodeUnstageVolume + stagingPath := m.getStagingPath() + if _, err := os.Stat(stagingPath); err != nil { if os.IsNotExist(err) { - klog.V(4).Infof(log("blockMapper.TearDownDevice publishPath(%s) has already been deleted, skip calling NodeUnpublishVolume", publishPath)) + klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath)) } else { return err } } else { - err := m.unpublishVolumeForBlock(ctx, csiClient, publishPath) + err := m.unstageVolumeForBlock(ctx, csiClient, stagingPath) if err != nil { return err } } - // Call NodeUnstageVolume - stagingPath := m.getStagingPath() - if _, err := os.Stat(stagingPath); err != nil { + return nil +} + +// UnmapPodDevice unmaps the block device path. +func (m *csiBlockMapper) UnmapPodDevice() error { + if !m.plugin.blockEnabled { + return errors.New("CSIBlockVolume feature not enabled") + } + publishPath := m.getPublishPath() + + csiClient, err := m.csiClientGetter.Get() + if err != nil { + return errors.New(log("blockMapper.UnmapPodDevice failed to get CSI client: %v", err)) + } + + ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) + defer cancel() + + // Call NodeUnpublishVolume + if _, err := os.Stat(publishPath); err != nil { if os.IsNotExist(err) { - klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath)) + klog.V(4).Infof(log("blockMapper.UnmapPodDevice publishPath(%s) has already been deleted, skip calling NodeUnpublishVolume", publishPath)) } else { return err } } else { - err := m.unstageVolumeForBlock(ctx, csiClient, stagingPath) + err := m.unpublishVolumeForBlock(ctx, csiClient, publishPath) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go index 73c778456c4..99c3181a0aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go @@ -30,11 +30,9 @@ import ( "google.golang.org/grpc" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" - csipbv0 "k8s.io/kubernetes/pkg/volume/csi/csiv0" ) type csiClient interface { @@ -95,7 +93,6 @@ type csiDriverClient struct { driverName csiDriverName addr csiAddr nodeV1ClientCreator nodeV1ClientCreator - nodeV0ClientCreator nodeV0ClientCreator } var _ csiClient = &csiDriverClient{} @@ -106,12 +103,6 @@ type nodeV1ClientCreator func(addr csiAddr) ( err error, ) -type nodeV0ClientCreator func(addr csiAddr) ( - nodeClient csipbv0.NodeClient, - closer io.Closer, - err error, -) - const ( initialDuration = 1 * time.Second factor = 2.0 @@ -134,22 +125,6 @@ func newV1NodeClient(addr csiAddr) (nodeClient csipbv1.NodeClient, closer io.Clo return nodeClient, conn, nil } -// newV0NodeClient creates a new NodeClient with the internally used gRPC -// connection set up. It also returns a closer which must to be called to close -// the gRPC connection when the NodeClient is not used anymore. -// This is the default implementation for the nodeV1ClientCreator, used in -// newCsiDriverClient. -func newV0NodeClient(addr csiAddr) (nodeClient csipbv0.NodeClient, closer io.Closer, err error) { - var conn *grpc.ClientConn - conn, err = newGrpcConn(addr) - if err != nil { - return nil, nil, err - } - - nodeClient = csipbv0.NewNodeClient(conn) - return nodeClient, conn, nil -} - func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) { if driverName == "" { return nil, fmt.Errorf("driver name is empty") @@ -161,19 +136,10 @@ func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) { } nodeV1ClientCreator := newV1NodeClient - nodeV0ClientCreator := newV0NodeClient - - if versionRequiresV0Client(existingDriver.highestSupportedVersion) { - nodeV1ClientCreator = nil - } else { - nodeV0ClientCreator = nil - } - return &csiDriverClient{ driverName: driverName, addr: csiAddr(existingDriver.endpoint), nodeV1ClientCreator: nodeV1ClientCreator, - nodeV0ClientCreator: nodeV0ClientCreator, }, nil } @@ -188,11 +154,7 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( backoff := wait.Backoff{Duration: initialDuration, Factor: factor, Steps: steps} err = wait.ExponentialBackoff(backoff, func() (bool, error) { var getNodeInfoError error - if c.nodeV1ClientCreator != nil { - nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx) - } else if c.nodeV0ClientCreator != nil { - nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV0(ctx) - } + nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx) if nodeID != "" { return true, nil } @@ -231,30 +193,6 @@ func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) ( return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil } -func (c *csiDriverClient) nodeGetInfoV0(ctx context.Context) ( - nodeID string, - maxVolumePerNode int64, - accessibleTopology map[string]string, - err error) { - - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) - if err != nil { - return "", 0, nil, err - } - defer closer.Close() - - res, err := nodeClient.NodeGetInfo(ctx, &csipbv0.NodeGetInfoRequest{}) - if err != nil { - return "", 0, nil, err - } - - topology := res.GetAccessibleTopology() - if topology != nil { - accessibleTopology = topology.Segments - } - return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil -} - func (c *csiDriverClient) NodePublishVolume( ctx context.Context, volID string, @@ -275,88 +213,11 @@ func (c *csiDriverClient) NodePublishVolume( if targetPath == "" { return errors.New("missing target path") } - if c.nodeV1ClientCreator != nil { - return c.nodePublishVolumeV1( - ctx, - volID, - readOnly, - stagingTargetPath, - targetPath, - accessMode, - publishContext, - volumeContext, - secrets, - fsType, - mountOptions, - ) - } else if c.nodeV0ClientCreator != nil { - return c.nodePublishVolumeV0( - ctx, - volID, - readOnly, - stagingTargetPath, - targetPath, - accessMode, - publishContext, - volumeContext, - secrets, - fsType, - mountOptions, - ) - } - - return fmt.Errorf("failed to call NodePublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") - -} - -func (c *csiDriverClient) NodeExpandVolume(ctx context.Context, volumeID, volumePath string, newSize resource.Quantity) (resource.Quantity, error) { if c.nodeV1ClientCreator == nil { - return newSize, fmt.Errorf("version of CSI driver does not support volume expansion") - } + return errors.New("failed to call NodePublishVolume. nodeV1ClientCreator is nil") - if volumeID == "" { - return newSize, errors.New("missing volume id") - } - if volumePath == "" { - return newSize, errors.New("missing volume path") - } - - if newSize.Value() < 0 { - return newSize, errors.New("size can not be less than 0") } - nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) - if err != nil { - return newSize, err - } - defer closer.Close() - - req := &csipbv1.NodeExpandVolumeRequest{ - VolumeId: volumeID, - VolumePath: volumePath, - CapacityRange: &csipbv1.CapacityRange{RequiredBytes: newSize.Value()}, - } - resp, err := nodeClient.NodeExpandVolume(ctx, req) - if err != nil { - return newSize, err - } - updatedQuantity := resource.NewQuantity(resp.CapacityBytes, resource.BinarySI) - return *updatedQuantity, nil -} - -func (c *csiDriverClient) nodePublishVolumeV1( - ctx context.Context, - volID string, - readOnly bool, - stagingTargetPath string, - targetPath string, - accessMode api.PersistentVolumeAccessMode, - publishContext map[string]string, - volumeContext map[string]string, - secrets map[string]string, - fsType string, - mountOptions []string, -) error { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err @@ -397,57 +258,39 @@ func (c *csiDriverClient) nodePublishVolumeV1( return err } -func (c *csiDriverClient) nodePublishVolumeV0( - ctx context.Context, - volID string, - readOnly bool, - stagingTargetPath string, - targetPath string, - accessMode api.PersistentVolumeAccessMode, - publishContext map[string]string, - volumeContext map[string]string, - secrets map[string]string, - fsType string, - mountOptions []string, -) error { - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) - if err != nil { - return err +func (c *csiDriverClient) NodeExpandVolume(ctx context.Context, volumeID, volumePath string, newSize resource.Quantity) (resource.Quantity, error) { + if c.nodeV1ClientCreator == nil { + return newSize, fmt.Errorf("version of CSI driver does not support volume expansion") } - defer closer.Close() - req := &csipbv0.NodePublishVolumeRequest{ - VolumeId: volID, - TargetPath: targetPath, - Readonly: readOnly, - PublishInfo: publishContext, - VolumeAttributes: volumeContext, - NodePublishSecrets: secrets, - VolumeCapability: &csipbv0.VolumeCapability{ - AccessMode: &csipbv0.VolumeCapability_AccessMode{ - Mode: asCSIAccessModeV0(accessMode), - }, - }, + if volumeID == "" { + return newSize, errors.New("missing volume id") } - if stagingTargetPath != "" { - req.StagingTargetPath = stagingTargetPath + if volumePath == "" { + return newSize, errors.New("missing volume path") } - if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ - Block: &csipbv0.VolumeCapability_BlockVolume{}, - } - } else { - req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ - Mount: &csipbv0.VolumeCapability_MountVolume{ - FsType: fsType, - MountFlags: mountOptions, - }, - } + if newSize.Value() < 0 { + return newSize, errors.New("size can not be less than 0") } - _, err = nodeClient.NodePublishVolume(ctx, req) - return err + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return newSize, err + } + defer closer.Close() + + req := &csipbv1.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: volumePath, + CapacityRange: &csipbv1.CapacityRange{RequiredBytes: newSize.Value()}, + } + resp, err := nodeClient.NodeExpandVolume(ctx, req) + if err != nil { + return newSize, err + } + updatedQuantity := resource.NewQuantity(resp.CapacityBytes, resource.BinarySI) + return *updatedQuantity, nil } func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error { @@ -458,17 +301,10 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, if targetPath == "" { return errors.New("missing target path") } - - if c.nodeV1ClientCreator != nil { - return c.nodeUnpublishVolumeV1(ctx, volID, targetPath) - } else if c.nodeV0ClientCreator != nil { - return c.nodeUnpublishVolumeV0(ctx, volID, targetPath) + if c.nodeV1ClientCreator == nil { + return errors.New("nodeV1ClientCreate is nil") } - return fmt.Errorf("failed to call NodeUnpublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") -} - -func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID string, targetPath string) error { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err @@ -484,22 +320,6 @@ func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID strin return err } -func (c *csiDriverClient) nodeUnpublishVolumeV0(ctx context.Context, volID string, targetPath string) error { - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) - if err != nil { - return err - } - defer closer.Close() - - req := &csipbv0.NodeUnpublishVolumeRequest{ - VolumeId: volID, - TargetPath: targetPath, - } - - _, err = nodeClient.NodeUnpublishVolume(ctx, req) - return err -} - func (c *csiDriverClient) NodeStageVolume(ctx context.Context, volID string, publishContext map[string]string, @@ -517,27 +337,10 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, if stagingTargetPath == "" { return errors.New("missing staging target path") } - - if c.nodeV1ClientCreator != nil { - return c.nodeStageVolumeV1(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext, mountOptions) - } else if c.nodeV0ClientCreator != nil { - return c.nodeStageVolumeV0(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext, mountOptions) + if c.nodeV1ClientCreator == nil { + return errors.New("nodeV1ClientCreate is nil") } - return fmt.Errorf("failed to call NodeStageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") -} - -func (c *csiDriverClient) nodeStageVolumeV1( - ctx context.Context, - volID string, - publishContext map[string]string, - stagingTargetPath string, - fsType string, - accessMode api.PersistentVolumeAccessMode, - secrets map[string]string, - volumeContext map[string]string, - mountOptions []string, -) error { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err @@ -574,53 +377,6 @@ func (c *csiDriverClient) nodeStageVolumeV1( return err } -func (c *csiDriverClient) nodeStageVolumeV0( - ctx context.Context, - volID string, - publishContext map[string]string, - stagingTargetPath string, - fsType string, - accessMode api.PersistentVolumeAccessMode, - secrets map[string]string, - volumeContext map[string]string, - mountOptions []string, -) error { - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) - if err != nil { - return err - } - defer closer.Close() - - req := &csipbv0.NodeStageVolumeRequest{ - VolumeId: volID, - PublishInfo: publishContext, - StagingTargetPath: stagingTargetPath, - VolumeCapability: &csipbv0.VolumeCapability{ - AccessMode: &csipbv0.VolumeCapability_AccessMode{ - Mode: asCSIAccessModeV0(accessMode), - }, - }, - NodeStageSecrets: secrets, - VolumeAttributes: volumeContext, - } - - if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ - Block: &csipbv0.VolumeCapability_BlockVolume{}, - } - } else { - req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ - Mount: &csipbv0.VolumeCapability_MountVolume{ - FsType: fsType, - MountFlags: mountOptions, - }, - } - } - - _, err = nodeClient.NodeStageVolume(ctx, req) - return err -} - func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error { klog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) if volID == "" { @@ -629,17 +385,10 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT if stagingTargetPath == "" { return errors.New("missing staging target path") } - - if c.nodeV1ClientCreator != nil { - return c.nodeUnstageVolumeV1(ctx, volID, stagingTargetPath) - } else if c.nodeV0ClientCreator != nil { - return c.nodeUnstageVolumeV0(ctx, volID, stagingTargetPath) + if c.nodeV1ClientCreator == nil { + return errors.New("nodeV1ClientCreate is nil") } - return fmt.Errorf("failed to call NodeUnstageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") -} - -func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagingTargetPath string) error { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err @@ -654,68 +403,12 @@ func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagin return err } -func (c *csiDriverClient) nodeUnstageVolumeV0(ctx context.Context, volID, stagingTargetPath string) error { - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) - if err != nil { - return err - } - defer closer.Close() - - req := &csipbv0.NodeUnstageVolumeRequest{ - VolumeId: volID, - StagingTargetPath: stagingTargetPath, - } - _, err = nodeClient.NodeUnstageVolume(ctx, req) - return err -} - func (c *csiDriverClient) NodeSupportsNodeExpand(ctx context.Context) (bool, error) { klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if Node has EXPAND_VOLUME capability")) - - if c.nodeV1ClientCreator != nil { - nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) - if err != nil { - return false, err - } - defer closer.Close() - - req := &csipbv1.NodeGetCapabilitiesRequest{} - resp, err := nodeClient.NodeGetCapabilities(ctx, req) - if err != nil { - return false, err - } - - capabilities := resp.GetCapabilities() - - if capabilities == nil { - return false, nil - } - for _, capability := range capabilities { - if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME { - return true, nil - } - } - return false, nil - } else if c.nodeV0ClientCreator != nil { - return false, nil - } - return false, fmt.Errorf("failed to call NodeSupportsNodeExpand. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") - -} - -func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) { - klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage")) - - if c.nodeV1ClientCreator != nil { - return c.nodeSupportsStageUnstageV1(ctx) - } else if c.nodeV0ClientCreator != nil { - return c.nodeSupportsStageUnstageV0(ctx) + if c.nodeV1ClientCreator == nil { + return false, errors.New("nodeV1ClientCreate is nil") } - return false, fmt.Errorf("failed to call NodeSupportsStageUnstage. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") -} - -func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, error) { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return false, err @@ -730,26 +423,30 @@ func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, capabilities := resp.GetCapabilities() - stageUnstageSet := false if capabilities == nil { return false, nil } for _, capability := range capabilities { - if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { - stageUnstageSet = true + if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME { + return true, nil } } - return stageUnstageSet, nil + return false, nil } -func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, error) { - nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) +func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) { + klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage")) + if c.nodeV1ClientCreator == nil { + return false, errors.New("nodeV1ClientCreate is nil") + } + + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return false, err } defer closer.Close() - req := &csipbv0.NodeGetCapabilitiesRequest{} + req := &csipbv1.NodeGetCapabilitiesRequest{} resp, err := nodeClient.NodeGetCapabilities(ctx, req) if err != nil { return false, err @@ -762,8 +459,9 @@ func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, return false, nil } for _, capability := range capabilities { - if capability.GetRpc().GetType() == csipbv0.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { stageUnstageSet = true + break } } return stageUnstageSet, nil @@ -781,18 +479,6 @@ func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapabili return csipbv1.VolumeCapability_AccessMode_UNKNOWN } -func asCSIAccessModeV0(am api.PersistentVolumeAccessMode) csipbv0.VolumeCapability_AccessMode_Mode { - switch am { - case api.ReadWriteOnce: - return csipbv0.VolumeCapability_AccessMode_SINGLE_NODE_WRITER - case api.ReadOnlyMany: - return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY - case api.ReadWriteMany: - return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER - } - return csipbv0.VolumeCapability_AccessMode_UNKNOWN -} - func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) { network := "unix" klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) @@ -806,14 +492,6 @@ func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) { ) } -func versionRequiresV0Client(version *utilversion.Version) bool { - if version != nil && version.Major() == 0 { - return true - } - - return false -} - // CSI client getter with cache. // This provides a method to initialize CSI client with driver name and caches // it for later use. When CSI clients have not been discovered yet (e.g. @@ -849,13 +527,10 @@ func (c *csiClientGetter) Get() (csiClient, error) { func (c *csiDriverClient) NodeSupportsVolumeStats(ctx context.Context) (bool, error) { klog.V(5).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsVolumeStats")) - if c.nodeV1ClientCreator != nil { - return c.nodeSupportsVolumeStatsV1(ctx) + if c.nodeV1ClientCreator == nil { + return false, errors.New("nodeV1ClientCreate is nil") } - return false, fmt.Errorf("failed to call NodeSupportsVolumeStats. nodeV1ClientCreator is nil") -} -func (c *csiDriverClient) nodeSupportsVolumeStatsV1(ctx context.Context) (bool, error) { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return false, err @@ -886,19 +561,10 @@ func (c *csiDriverClient) NodeGetVolumeStats(ctx context.Context, volID string, if targetPath == "" { return nil, errors.New("missing target path") } - - if c.nodeV1ClientCreator != nil { - return c.nodeGetVolumeStatsV1(ctx, volID, targetPath) + if c.nodeV1ClientCreator == nil { + return nil, errors.New("nodeV1ClientCreate is nil") } - return nil, fmt.Errorf("failed to call NodeGetVolumeStats. nodeV1ClientCreator is nil") -} - -func (c *csiDriverClient) nodeGetVolumeStatsV1( - ctx context.Context, - volID string, - targetPath string, -) (*volume.Metrics, error) { nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index b14476a4c1f..2cce6de579c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -67,6 +67,7 @@ type csiMountMgr struct { volumeID string specVolumeID string readOnly bool + supportsSELinux bool spec *volume.Spec pod *api.Pod podUID types.UID @@ -259,6 +260,11 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error return errors.New(log("mounter.SetupAt failed: %v", err)) } + c.supportsSELinux, err = c.kubeVolHost.GetHostUtil().GetSELinuxSupport(dir) + if err != nil { + klog.V(2).Info(log("error checking for SELinux support: %s", err)) + } + // apply volume ownership // The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323 // if fstype is "", then skip fsgroup (could be indication of non-block filesystem) @@ -328,18 +334,10 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { } func (c *csiMountMgr) GetAttributes() volume.Attributes { - path := c.GetPath() - hu := c.kubeVolHost.GetHostUtil() - supportSelinux, err := hu.GetSELinuxSupport(path) - if err != nil { - klog.V(2).Info(log("error checking for SELinux support: %s", err)) - // Best guess - supportSelinux = false - } return volume.Attributes{ ReadOnly: c.readOnly, Managed: !c.readOnly, - SupportsSELinux: supportSelinux, + SupportsSELinux: c.supportsSELinux, } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go index f90878302ba..d0e8f831bd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -21,7 +21,6 @@ import ( "fmt" "os" "path" - "sort" "strings" "time" @@ -60,8 +59,6 @@ const ( CsiResyncPeriod = time.Minute ) -var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"} - type csiPlugin struct { host volume.VolumeHost blockEnabled bool @@ -97,21 +94,15 @@ var PluginHandler = &RegistrationHandler{} // ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. -func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { - klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s, foundInDeprecatedDir: %v", - pluginName, endpoint, strings.Join(versions, ","), foundInDeprecatedDir)) - - if foundInDeprecatedDir { - // CSI 0.x drivers used /var/lib/kubelet/plugins as the socket dir. - // This was deprecated as the socket dir for kubelet drivers, in lieu of a dedicated dir /var/lib/kubelet/plugins_registry - // The deprecated dir will only be allowed for a whitelisted set of old versions. - // CSI 1.x drivers should use the /var/lib/kubelet/plugins_registry - if !isDeprecatedSocketDirAllowed(versions) { - return errors.New(log("socket for CSI driver %q versions %v was found in a deprecated dir. Drivers implementing CSI 1.x+ must use the new dir", pluginName, versions)) - } - } +func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { + klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s", + pluginName, endpoint, strings.Join(versions, ","))) _, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions) + if err != nil { + return fmt.Errorf("validation failed for CSI Driver %s at endpoint %s: %v", pluginName, endpoint, err) + } + return err } @@ -323,10 +314,6 @@ func (p *csiPlugin) CanSupport(spec *volume.Spec) bool { return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil } -func (p *csiPlugin) IsMigratedToCSI() bool { - return false -} - func (p *csiPlugin) RequiresRemount() bool { return false } @@ -745,7 +732,9 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) { kletHost, ok := p.host.(volume.KubeletVolumeHost) if ok { - kletHost.WaitForCacheSync() + if err := kletHost.WaitForCacheSync(); err != nil { + return false, err + } } if p.csiDriverLister == nil { @@ -784,7 +773,9 @@ func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storag if p.csiDriverLister != nil { kletHost, ok := p.host.(volume.KubeletVolumeHost) if ok { - kletHost.WaitForCacheSync() + if err := kletHost.WaitForCacheSync(); err != nil { + return err + } } c, err := p.csiDriverLister.Get(driver) @@ -892,46 +883,36 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) { return nil, errors.New(log("CSI driver reporting empty array for supported versions")) } - // Sort by lowest to highest version - sort.Slice(versions, func(i, j int) bool { - parsedVersionI, err := utilversion.ParseGeneric(versions[i]) - if err != nil { - // Push bad values to the bottom - return true - } - - parsedVersionJ, err := utilversion.ParseGeneric(versions[j]) - if err != nil { - // Push bad values to the bottom - return false - } - - return parsedVersionI.LessThan(parsedVersionJ) - }) - + var highestSupportedVersion *utilversion.Version + var theErr error for i := len(versions) - 1; i >= 0; i-- { - highestSupportedVersion, err := utilversion.ParseGeneric(versions[i]) + currentHighestVer, err := utilversion.ParseGeneric(versions[i]) if err != nil { - return nil, err + theErr = err + continue } - - if highestSupportedVersion.Major() <= 1 { - return highestSupportedVersion, nil + if currentHighestVer.Major() > 1 { + // CSI currently only has version 0.x and 1.x (see https://github.com/container-storage-interface/spec/releases). + // Therefore any driver claiming version 2.x+ is ignored as an unsupported versions. + // Future 1.x versions of CSI are supposed to be backwards compatible so this version of Kubernetes will work with any 1.x driver + // (or 0.x), but it may not work with 2.x drivers (because 2.x does not have to be backwards compatible with 1.x). + continue + } + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer } } - return nil, errors.New(log("None of the CSI versions reported by this driver are supported")) -} - -// Only drivers that implement CSI 0.x are allowed to use deprecated socket dir. -func isDeprecatedSocketDirAllowed(versions []string) bool { - for _, version := range versions { - if isV0Version(version) { - return true - } + if highestSupportedVersion == nil { + return nil, fmt.Errorf("could not find a highest supported version from versions (%v) reported by this driver: %v", versions, theErr) } - return false + if highestSupportedVersion.Major() != 1 { + // CSI v0.x is no longer supported as of Kubernetes v1.17 in + // accordance with deprecation policy set out in Kubernetes v1.13 + return nil, fmt.Errorf("highest supported version reported by driver is %v, must be v1.x", highestSupportedVersion) + } + return highestSupportedVersion, nil } func isV0Version(version string) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go deleted file mode 100644 index 174badd75a0..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go +++ /dev/null @@ -1,5007 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// For backwards compatibility with CSI 0.x we carry a copy of the -// CSI 0.3 client. - -package csiv0 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import wrappers "github.com/golang/protobuf/ptypes/wrappers" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type PluginCapability_Service_Type int32 - -const ( - PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 - // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for - // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins may wish to omit the - // ControllerService entirely from their implementation, but such - // SHOULD NOT be the common case. - // The presence of this capability determines whether the CO will - // attempt to invoke the REQUIRED ControllerService RPCs, as well - // as specific RPCs as indicated by ControllerGetCapabilities. - PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 - // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this - // plugin may not be equally accessible by all nodes in the - // cluster. The CO MUST use the topology information returned by - // CreateVolumeRequest along with the topology information - // returned by NodeGetInfo to ensure that a given volume is - // accessible from a given node when scheduling workloads. - PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 -) - -var PluginCapability_Service_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CONTROLLER_SERVICE", - 2: "ACCESSIBILITY_CONSTRAINTS", -} -var PluginCapability_Service_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CONTROLLER_SERVICE": 1, - "ACCESSIBILITY_CONSTRAINTS": 2, -} - -func (x PluginCapability_Service_Type) String() string { - return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) -} -func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4, 0, 0} -} - -type VolumeCapability_AccessMode_Mode int32 - -const ( - VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 - // Can only be published once as read/write on a single node, at - // any given time. - VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 - // Can only be published once as readonly on a single node, at - // any given time. - VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 - // Can be published as readonly at multiple nodes simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 - // Can be published at multiple nodes simultaneously. Only one of - // the node can be used as read/write. The rest will be readonly. - VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 - // Can be published as read/write at multiple nodes - // simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 -) - -var VolumeCapability_AccessMode_Mode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SINGLE_NODE_WRITER", - 2: "SINGLE_NODE_READER_ONLY", - 3: "MULTI_NODE_READER_ONLY", - 4: "MULTI_NODE_SINGLE_WRITER", - 5: "MULTI_NODE_MULTI_WRITER", -} -var VolumeCapability_AccessMode_Mode_value = map[string]int32{ - "UNKNOWN": 0, - "SINGLE_NODE_WRITER": 1, - "SINGLE_NODE_READER_ONLY": 2, - "MULTI_NODE_READER_ONLY": 3, - "MULTI_NODE_SINGLE_WRITER": 4, - "MULTI_NODE_MULTI_WRITER": 5, -} - -func (x VolumeCapability_AccessMode_Mode) String() string { - return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) -} -func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 2, 0} -} - -type ControllerServiceCapability_RPC_Type int32 - -const ( - ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 - ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 - ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 - ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 - ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 - // Currently the only way to consume a snapshot is to create - // a volume from it. Therefore plugins supporting - // CREATE_DELETE_SNAPSHOT MUST support creating volume from - // snapshot. - ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. - ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 -) - -var ControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_VOLUME", - 2: "PUBLISH_UNPUBLISH_VOLUME", - 3: "LIST_VOLUMES", - 4: "GET_CAPACITY", - 5: "CREATE_DELETE_SNAPSHOT", - 6: "LIST_SNAPSHOTS", -} -var ControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_VOLUME": 1, - "PUBLISH_UNPUBLISH_VOLUME": 2, - "LIST_VOLUMES": 3, - "GET_CAPACITY": 4, - "CREATE_DELETE_SNAPSHOT": 5, - "LIST_SNAPSHOTS": 6, -} - -func (x ControllerServiceCapability_RPC_Type) String() string { - return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) -} -func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29, 0, 0} -} - -type SnapshotStatus_Type int32 - -const ( - SnapshotStatus_UNKNOWN SnapshotStatus_Type = 0 - // A snapshot is ready for use. - SnapshotStatus_READY SnapshotStatus_Type = 1 - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - SnapshotStatus_UPLOADING SnapshotStatus_Type = 2 - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - SnapshotStatus_ERROR_UPLOADING SnapshotStatus_Type = 3 -) - -var SnapshotStatus_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "UPLOADING", - 3: "ERROR_UPLOADING", -} -var SnapshotStatus_Type_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "UPLOADING": 2, - "ERROR_UPLOADING": 3, -} - -func (x SnapshotStatus_Type) String() string { - return proto.EnumName(SnapshotStatus_Type_name, int32(x)) -} -func (SnapshotStatus_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{33, 0} -} - -type NodeServiceCapability_RPC_Type int32 - -const ( - NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 - NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 -) - -var NodeServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "STAGE_UNSTAGE_VOLUME", -} -var NodeServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "STAGE_UNSTAGE_VOLUME": 1, -} - -func (x NodeServiceCapability_RPC_Type) String() string { - return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) -} -func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50, 0, 0} -} - -type GetPluginInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } -func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoRequest) ProtoMessage() {} -func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{0} -} -func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) -} -func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) -} -func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) -} -func (m *GetPluginInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoRequest.Size(m) -} -func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo - -type GetPluginInfoResponse struct { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 - // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // This field is REQUIRED. Value of this field is opaque to the CO. - VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` - // This field is OPTIONAL. Values are opaque to the CO. - Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } -func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoResponse) ProtoMessage() {} -func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{1} -} -func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) -} -func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) -} -func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) -} -func (m *GetPluginInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoResponse.Size(m) -} -func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo - -func (m *GetPluginInfoResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetPluginInfoResponse) GetVendorVersion() string { - if m != nil { - return m.VendorVersion - } - return "" -} - -func (m *GetPluginInfoResponse) GetManifest() map[string]string { - if m != nil { - return m.Manifest - } - return nil -} - -type GetPluginCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } -func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesRequest) ProtoMessage() {} -func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{2} -} -func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) -} -func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) -} -func (m *GetPluginCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) -} -func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo - -type GetPluginCapabilitiesResponse struct { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - Capabilities []*PluginCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } -func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesResponse) ProtoMessage() {} -func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{3} -} -func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) -} -func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) -} -func (m *GetPluginCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) -} -func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo - -func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the plugin. -type PluginCapability struct { - // Types that are valid to be assigned to Type: - // *PluginCapability_Service_ - Type isPluginCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PluginCapability) Reset() { *m = PluginCapability{} } -func (m *PluginCapability) String() string { return proto.CompactTextString(m) } -func (*PluginCapability) ProtoMessage() {} -func (*PluginCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4} -} -func (m *PluginCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability.Unmarshal(m, b) -} -func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) -} -func (dst *PluginCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability.Merge(dst, src) -} -func (m *PluginCapability) XXX_Size() int { - return xxx_messageInfo_PluginCapability.Size(m) -} -func (m *PluginCapability) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginCapability proto.InternalMessageInfo - -type isPluginCapability_Type interface { - isPluginCapability_Type() -} - -type PluginCapability_Service_ struct { - Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,oneof"` -} - -func (*PluginCapability_Service_) isPluginCapability_Type() {} - -func (m *PluginCapability) GetType() isPluginCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *PluginCapability) GetService() *PluginCapability_Service { - if x, ok := m.GetType().(*PluginCapability_Service_); ok { - return x.Service - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ - (*PluginCapability_Service_)(nil), - } -} - -func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Service); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) - } - return nil -} - -func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*PluginCapability) - switch tag { - case 1: // type.service - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PluginCapability_Service) - err := b.DecodeMessage(msg) - m.Type = &PluginCapability_Service_{msg} - return true, err - default: - return false, nil - } -} - -func _PluginCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - s := proto.Size(x.Service) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PluginCapability_Service struct { - Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.PluginCapability_Service_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } -func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } -func (*PluginCapability_Service) ProtoMessage() {} -func (*PluginCapability_Service) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4, 0} -} -func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) -} -func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) -} -func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_Service.Merge(dst, src) -} -func (m *PluginCapability_Service) XXX_Size() int { - return xxx_messageInfo_PluginCapability_Service.Size(m) -} -func (m *PluginCapability_Service) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo - -func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { - if m != nil { - return m.Type - } - return PluginCapability_Service_UNKNOWN -} - -type ProbeRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } -func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } -func (*ProbeRequest) ProtoMessage() {} -func (*ProbeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{5} -} -func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) -} -func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) -} -func (dst *ProbeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeRequest.Merge(dst, src) -} -func (m *ProbeRequest) XXX_Size() int { - return xxx_messageInfo_ProbeRequest.Size(m) -} -func (m *ProbeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo - -type ProbeResponse struct { - // Readiness allows a plugin to report its initialization status back - // to the CO. Initialization for some plugins MAY be time consuming - // and it is important for a CO to distinguish between the following - // cases: - // - // 1) The plugin is in an unhealthy state and MAY need restarting. In - // this case a gRPC error code SHALL be returned. - // 2) The plugin is still initializing, but is otherwise perfectly - // healthy. In this case a successful response SHALL be returned - // with a readiness value of `false`. Calls to the plugin's - // Controller and/or Node services MAY fail due to an incomplete - // initialization state. - // 3) The plugin has finished initializing and is ready to service - // calls to its Controller and/or Node services. A successful - // response is returned with a readiness value of `true`. - // - // This field is OPTIONAL. If not present, the caller SHALL assume - // that the plugin is in a ready state and is accepting calls to its - // Controller and/or Node services (according to the plugin's reported - // capabilities). - Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready" json:"ready,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } -func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ProbeResponse) ProtoMessage() {} -func (*ProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{6} -} -func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) -} -func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) -} -func (dst *ProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeResponse.Merge(dst, src) -} -func (m *ProbeResponse) XXX_Size() int { - return xxx_messageInfo_ProbeResponse.Size(m) -} -func (m *ProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo - -func (m *ProbeResponse) GetReady() *wrappers.BoolValue { - if m != nil { - return m.Ready - } - return nil -} - -type CreateVolumeRequest struct { - // The suggested name for the storage space. This field is REQUIRED. - // It serves two purposes: - // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. - // 2) Suggested name - Some storage systems allow callers to specify - // an identifier by which to refer to the newly provisioned - // storage. If a storage system supports this, it can optionally - // use this name as the identifier for the new volume. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Secrets required by plugin to complete volume creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerCreateSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_create_secrets,json=controllerCreateSecrets" json:"controller_create_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, the new volume will be pre-populated with data from - // this source. This field is OPTIONAL. - VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource" json:"volume_content_source,omitempty"` - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume MUST be accessible from. - // An SP SHALL advertise the requirements for topological - // accessibility information in documentation. COs SHALL only specify - // topological accessibility information supported by the SP. - // This field is OPTIONAL. - // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. - AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements" json:"accessibility_requirements,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } -func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeRequest) ProtoMessage() {} -func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{7} -} -func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) -} -func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) -} -func (m *CreateVolumeRequest) XXX_Size() int { - return xxx_messageInfo_CreateVolumeRequest.Size(m) -} -func (m *CreateVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo - -func (m *CreateVolumeRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange - } - return nil -} - -func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *CreateVolumeRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *CreateVolumeRequest) GetControllerCreateSecrets() map[string]string { - if m != nil { - return m.ControllerCreateSecrets - } - return nil -} - -func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { - if m != nil { - return m.VolumeContentSource - } - return nil -} - -func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { - if m != nil { - return m.AccessibilityRequirements - } - return nil -} - -// Specifies what source the volume will be created from. One of the -// type fields MUST be specified. -type VolumeContentSource struct { - // Types that are valid to be assigned to Type: - // *VolumeContentSource_Snapshot - Type isVolumeContentSource_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } -func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource) ProtoMessage() {} -func (*VolumeContentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{8} -} -func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) -} -func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) -} -func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource.Merge(dst, src) -} -func (m *VolumeContentSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource.Size(m) -} -func (m *VolumeContentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo - -type isVolumeContentSource_Type interface { - isVolumeContentSource_Type() -} - -type VolumeContentSource_Snapshot struct { - Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,oneof"` -} - -func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} - -func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { - if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { - return x.Snapshot - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ - (*VolumeContentSource_Snapshot)(nil), - } -} - -func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Snapshot); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) - } - return nil -} - -func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeContentSource) - switch tag { - case 1: // type.snapshot - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeContentSource_SnapshotSource) - err := b.DecodeMessage(msg) - m.Type = &VolumeContentSource_Snapshot{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - s := proto.Size(x.Snapshot) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type VolumeContentSource_SnapshotSource struct { - // Contains identity information for the existing source snapshot. - // This field is REQUIRED. Plugin is REQUIRED to support creating - // volume from snapshot if it supports the capability - // CREATE_DELETE_SNAPSHOT. - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } -func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} -func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{8, 0} -} -func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) -} -func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) -} -func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo - -func (m *VolumeContentSource_SnapshotSource) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type CreateVolumeResponse struct { - // Contains all attributes of the newly created volume that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the volume. This field is REQUIRED. - Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } -func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeResponse) ProtoMessage() {} -func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{9} -} -func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) -} -func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) -} -func (m *CreateVolumeResponse) XXX_Size() int { - return xxx_messageInfo_CreateVolumeResponse.Size(m) -} -func (m *CreateVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo - -func (m *CreateVolumeResponse) GetVolume() *Volume { - if m != nil { - return m.Volume - } - return nil -} - -// Specify a capability of a volume. -type VolumeCapability struct { - // Specifies what API the volume will be accessed using. One of the - // following fields MUST be specified. - // - // Types that are valid to be assigned to AccessType: - // *VolumeCapability_Block - // *VolumeCapability_Mount - AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` - // This is a REQUIRED field. - AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } -func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability) ProtoMessage() {} -func (*VolumeCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10} -} -func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) -} -func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability.Merge(dst, src) -} -func (m *VolumeCapability) XXX_Size() int { - return xxx_messageInfo_VolumeCapability.Size(m) -} -func (m *VolumeCapability) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo - -type isVolumeCapability_AccessType interface { - isVolumeCapability_AccessType() -} - -type VolumeCapability_Block struct { - Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` -} -type VolumeCapability_Mount struct { - Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` -} - -func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} -func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} - -func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { - if m != nil { - return m.AccessType - } - return nil -} - -func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { - return x.Block - } - return nil -} - -func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { - return x.Mount - } - return nil -} - -func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { - if m != nil { - return m.AccessMode - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ - (*VolumeCapability_Block)(nil), - (*VolumeCapability_Mount)(nil), - } -} - -func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Block); err != nil { - return err - } - case *VolumeCapability_Mount: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mount); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) - } - return nil -} - -func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeCapability) - switch tag { - case 1: // access_type.block - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_BlockVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Block{msg} - return true, err - case 2: // access_type.mount - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_MountVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Mount{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - s := proto.Size(x.Block) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *VolumeCapability_Mount: - s := proto.Size(x.Mount) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Indicate that the volume will be accessed via the block device API. -type VolumeCapability_BlockVolume struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } -func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_BlockVolume) ProtoMessage() {} -func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 0} -} -func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) -} -func (m *VolumeCapability_BlockVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) -} -func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo - -// Indicate that the volume will be accessed via the filesystem API. -type VolumeCapability_MountVolume struct { - // The filesystem type. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` - // The mount options that can be used for the volume. This field is - // OPTIONAL. `mount_flags` MAY contain sensitive information. - // Therefore, the CO and the Plugin MUST NOT leak this information - // to untrusted entities. The total size of this repeated field - // SHALL NOT exceed 4 KiB. - MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } -func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_MountVolume) ProtoMessage() {} -func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 1} -} -func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) -} -func (m *VolumeCapability_MountVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) -} -func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo - -func (m *VolumeCapability_MountVolume) GetFsType() string { - if m != nil { - return m.FsType - } - return "" -} - -func (m *VolumeCapability_MountVolume) GetMountFlags() []string { - if m != nil { - return m.MountFlags - } - return nil -} - -// Specify how a volume can be accessed. -type VolumeCapability_AccessMode struct { - // This field is REQUIRED. - Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.v0.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } -func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_AccessMode) ProtoMessage() {} -func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 2} -} -func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) -} -func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) -} -func (m *VolumeCapability_AccessMode) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) -} -func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo - -func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { - if m != nil { - return m.Mode - } - return VolumeCapability_AccessMode_UNKNOWN -} - -// The capacity of the storage space in bytes. To specify an exact size, -// `required_bytes` and `limit_bytes` SHALL be set to the same value. At -// least one of the these fields MUST be specified. -type CapacityRange struct { - // Volume MUST be at least this big. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` - // Volume MUST not be bigger than this. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CapacityRange) Reset() { *m = CapacityRange{} } -func (m *CapacityRange) String() string { return proto.CompactTextString(m) } -func (*CapacityRange) ProtoMessage() {} -func (*CapacityRange) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{11} -} -func (m *CapacityRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CapacityRange.Unmarshal(m, b) -} -func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) -} -func (dst *CapacityRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_CapacityRange.Merge(dst, src) -} -func (m *CapacityRange) XXX_Size() int { - return xxx_messageInfo_CapacityRange.Size(m) -} -func (m *CapacityRange) XXX_DiscardUnknown() { - xxx_messageInfo_CapacityRange.DiscardUnknown(m) -} - -var xxx_messageInfo_CapacityRange proto.InternalMessageInfo - -func (m *CapacityRange) GetRequiredBytes() int64 { - if m != nil { - return m.RequiredBytes - } - return 0 -} - -func (m *CapacityRange) GetLimitBytes() int64 { - if m != nil { - return m.LimitBytes - } - return 0 -} - -// The information about a provisioned volume. -type Volume struct { - // The capacity of the volume in bytes. This field is OPTIONAL. If not - // set (value of 0), it indicates that the capacity of the volume is - // unknown (e.g., NFS share). - // The value of this field MUST NOT be negative. - CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, indicates that the volume is not empty and is - // pre-populated with data from the specified source. - // This field is OPTIONAL. - ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource" json:"content_source,omitempty"` - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume is accessible from. - // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // An SP MAY specify multiple topologies to indicate the volume is - // accessible from multiple locations. - // COs MAY use this information along with the topology information - // returned by NodeGetInfo to ensure that a given volume is accessible - // from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available - // node. - // - // Example 1: - // accessible_topology = {"region": "R1", "zone": "Z2"} - // Indicates a volume accessible only from the "region" "R1" and the - // "zone" "Z2". - // - // Example 2: - // accessible_topology = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" - // in the "region" "R1". - AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Volume) Reset() { *m = Volume{} } -func (m *Volume) String() string { return proto.CompactTextString(m) } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{12} -} -func (m *Volume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Volume.Unmarshal(m, b) -} -func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Volume.Marshal(b, m, deterministic) -} -func (dst *Volume) XXX_Merge(src proto.Message) { - xxx_messageInfo_Volume.Merge(dst, src) -} -func (m *Volume) XXX_Size() int { - return xxx_messageInfo_Volume.Size(m) -} -func (m *Volume) XXX_DiscardUnknown() { - xxx_messageInfo_Volume.DiscardUnknown(m) -} - -var xxx_messageInfo_Volume proto.InternalMessageInfo - -func (m *Volume) GetCapacityBytes() int64 { - if m != nil { - return m.CapacityBytes - } - return 0 -} - -func (m *Volume) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Volume) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Volume) GetContentSource() *VolumeContentSource { - if m != nil { - return m.ContentSource - } - return nil -} - -func (m *Volume) GetAccessibleTopology() []*Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type TopologyRequirement struct { - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, than the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, than the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite" json:"requisite,omitempty"` - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred" json:"preferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } -func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } -func (*TopologyRequirement) ProtoMessage() {} -func (*TopologyRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{13} -} -func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) -} -func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) -} -func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologyRequirement.Merge(dst, src) -} -func (m *TopologyRequirement) XXX_Size() int { - return xxx_messageInfo_TopologyRequirement.Size(m) -} -func (m *TopologyRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) -} - -var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo - -func (m *TopologyRequirement) GetRequisite() []*Topology { - if m != nil { - return m.Requisite - } - return nil -} - -func (m *TopologyRequirement) GetPreferred() []*Topology { - if m != nil { - return m.Preferred - } - return nil -} - -// Topology is a map of topological domains to topological segments. -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Topology) Reset() { *m = Topology{} } -func (m *Topology) String() string { return proto.CompactTextString(m) } -func (*Topology) ProtoMessage() {} -func (*Topology) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{14} -} -func (m *Topology) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Topology.Unmarshal(m, b) -} -func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Topology.Marshal(b, m, deterministic) -} -func (dst *Topology) XXX_Merge(src proto.Message) { - xxx_messageInfo_Topology.Merge(dst, src) -} -func (m *Topology) XXX_Size() int { - return xxx_messageInfo_Topology.Size(m) -} -func (m *Topology) XXX_DiscardUnknown() { - xxx_messageInfo_Topology.DiscardUnknown(m) -} - -var xxx_messageInfo_Topology proto.InternalMessageInfo - -func (m *Topology) GetSegments() map[string]string { - if m != nil { - return m.Segments - } - return nil -} - -type DeleteVolumeRequest struct { - // The ID of the volume to be deprovisioned. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // Secrets required by plugin to complete volume deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerDeleteSecrets map[string]string `protobuf:"bytes,2,rep,name=controller_delete_secrets,json=controllerDeleteSecrets" json:"controller_delete_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } -func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeRequest) ProtoMessage() {} -func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{15} -} -func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) -} -func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) -} -func (m *DeleteVolumeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeRequest.Size(m) -} -func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo - -func (m *DeleteVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *DeleteVolumeRequest) GetControllerDeleteSecrets() map[string]string { - if m != nil { - return m.ControllerDeleteSecrets - } - return nil -} - -type DeleteVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } -func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeResponse) ProtoMessage() {} -func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{16} -} -func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) -} -func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) -} -func (m *DeleteVolumeResponse) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeResponse.Size(m) -} -func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo - -type ControllerPublishVolumeRequest struct { - // The ID of the volume to be used on a node. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is REQUIRED. The CO SHALL set this - // field to match the node ID returned by `NodeGetInfo`. - NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` - // Secrets required by plugin to complete controller publish volume - // request. This field is OPTIONAL. Refer to the - // `Secrets Requirements` section on how to use this field. - ControllerPublishSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_publish_secrets,json=controllerPublishSecrets" json:"controller_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } -func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeRequest) ProtoMessage() {} -func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{17} -} -func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) -} -func (m *ControllerPublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) -} -func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo - -func (m *ControllerPublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *ControllerPublishVolumeRequest) GetControllerPublishSecrets() map[string]string { - if m != nil { - return m.ControllerPublishSecrets - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type ControllerPublishVolumeResponse struct { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - PublishInfo map[string]string `protobuf:"bytes,1,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } -func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeResponse) ProtoMessage() {} -func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{18} -} -func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) -} -func (m *ControllerPublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) -} -func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo - -func (m *ControllerPublishVolumeResponse) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -type ControllerUnpublishVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is OPTIONAL. The CO SHOULD set this - // field to match the node ID returned by `NodeGetInfo` or leave it - // unset. If the value is set, the SP MUST unpublish the volume from - // the specified node. If the value is unset, the SP MUST unpublish - // the volume from all nodes it is published to. - NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // Secrets required by plugin to complete controller unpublish volume - // request. This SHOULD be the same secrets passed to the - // ControllerPublishVolume call for the specified volume. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerUnpublishSecrets map[string]string `protobuf:"bytes,3,rep,name=controller_unpublish_secrets,json=controllerUnpublishSecrets" json:"controller_unpublish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } -func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} -func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{19} -} -func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) -} -func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo - -func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetControllerUnpublishSecrets() map[string]string { - if m != nil { - return m.ControllerUnpublishSecrets - } - return nil -} - -type ControllerUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } -func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} -func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{20} -} -func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) -} -func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo - -type ValidateVolumeCapabilitiesRequest struct { - // The ID of the volume to check. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities - // specified below are supported. This field is REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,3,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - AccessibleTopology []*Topology `protobuf:"bytes,4,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } -func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{21} -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetAccessibleTopology() []*Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type ValidateVolumeCapabilitiesResponse struct { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` - // Message to the CO if `supported` above is false. This field is - // OPTIONAL. - // An empty string is equal to an unspecified field value. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } -func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{22} -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { - if m != nil { - return m.Supported - } - return false -} - -func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -type ListVolumesRequest struct { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListVolumes` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListVolumes` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } -func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } -func (*ListVolumesRequest) ProtoMessage() {} -func (*ListVolumesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{23} -} -func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) -} -func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) -} -func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesRequest.Merge(dst, src) -} -func (m *ListVolumesRequest) XXX_Size() int { - return xxx_messageInfo_ListVolumesRequest.Size(m) -} -func (m *ListVolumesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo - -func (m *ListVolumesRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries - } - return 0 -} - -func (m *ListVolumesRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken - } - return "" -} - -type ListVolumesResponse struct { - Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - // This token allows you to get the next page of entries for - // `ListVolumes` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListVolumes` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } -func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse) ProtoMessage() {} -func (*ListVolumesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{24} -} -func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) -} -func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) -} -func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse.Merge(dst, src) -} -func (m *ListVolumesResponse) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse.Size(m) -} -func (m *ListVolumesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo - -func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListVolumesResponse) GetNextToken() string { - if m != nil { - return m.NextToken - } - return "" -} - -type ListVolumesResponse_Entry struct { - Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } -func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse_Entry) ProtoMessage() {} -func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{24, 0} -} -func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) -} -func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) -} -func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) -} -func (m *ListVolumesResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) -} -func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo - -func (m *ListVolumesResponse_Entry) GetVolume() *Volume { - if m != nil { - return m.Volume - } - return nil -} - -type GetCapacityRequest struct { - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that satisfy ALL of the - // specified `volume_capabilities`. These are the same - // `volume_capabilities` the CO will use in `CreateVolumeRequest`. - // This field is OPTIONAL. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes with the given Plugin - // specific `parameters`. These are the same `parameters` the CO will - // use in `CreateVolumeRequest`. This field is OPTIONAL. - Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that in the specified - // `accessible_topology`. This is the same as the - // `accessible_topology` the CO returns in a `CreateVolumeResponse`. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } -func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } -func (*GetCapacityRequest) ProtoMessage() {} -func (*GetCapacityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{25} -} -func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) -} -func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) -} -func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityRequest.Merge(dst, src) -} -func (m *GetCapacityRequest) XXX_Size() int { - return xxx_messageInfo_GetCapacityRequest.Size(m) -} -func (m *GetCapacityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo - -func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *GetCapacityRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type GetCapacityResponse struct { - // The available capacity, in bytes, of the storage that can be used - // to provision volumes. If `volume_capabilities` or `parameters` is - // specified in the request, the Plugin SHALL take those into - // consideration when calculating the available capacity of the - // storage. This field is REQUIRED. - // The value of this field MUST NOT be negative. - AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } -func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } -func (*GetCapacityResponse) ProtoMessage() {} -func (*GetCapacityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{26} -} -func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) -} -func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) -} -func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityResponse.Merge(dst, src) -} -func (m *GetCapacityResponse) XXX_Size() int { - return xxx_messageInfo_GetCapacityResponse.Size(m) -} -func (m *GetCapacityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo - -func (m *GetCapacityResponse) GetAvailableCapacity() int64 { - if m != nil { - return m.AvailableCapacity - } - return 0 -} - -type ControllerGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } -func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} -func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{27} -} -func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) -} -func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo - -type ControllerGetCapabilitiesResponse struct { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } -func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} -func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{28} -} -func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) -} -func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo - -func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the controller service. -type ControllerServiceCapability struct { - // Types that are valid to be assigned to Type: - // *ControllerServiceCapability_Rpc - Type isControllerServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } -func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability) ProtoMessage() {} -func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29} -} -func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) -} -func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) -} -func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) -} -func (m *ControllerServiceCapability) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability.Size(m) -} -func (m *ControllerServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo - -type isControllerServiceCapability_Type interface { - isControllerServiceCapability_Type() -} - -type ControllerServiceCapability_Rpc struct { - Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} - -func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { - if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ - (*ControllerServiceCapability_Rpc)(nil), - } -} - -func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ControllerServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ControllerServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &ControllerServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ControllerServiceCapability_RPC struct { - Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } -func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability_RPC) ProtoMessage() {} -func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29, 0} -} -func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) -} -func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) -} -func (m *ControllerServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) -} -func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo - -func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return ControllerServiceCapability_RPC_UNKNOWN -} - -type CreateSnapshotRequest struct { - // The ID of the source volume to be snapshotted. - // This field is REQUIRED. - SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // The suggested name for the snapshot. This field is REQUIRED for - // idempotency. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // Secrets required by plugin to complete snapshot creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - CreateSnapshotSecrets map[string]string `protobuf:"bytes,3,rep,name=create_snapshot_secrets,json=createSnapshotSecrets" json:"create_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - // Use cases for opaque parameters: - // - Specify a policy to automatically clean up the snapshot. - // - Specify an expiration date for the snapshot. - // - Specify whether the snapshot is readonly or read/write. - // - Specify if the snapshot should be replicated to some place. - // - Specify primary or secondary for replication systems that - // support snapshotting only on primary. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } -func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotRequest) ProtoMessage() {} -func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{30} -} -func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) -} -func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) -} -func (m *CreateSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotRequest.Size(m) -} -func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo - -func (m *CreateSnapshotRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *CreateSnapshotRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *CreateSnapshotRequest) GetCreateSnapshotSecrets() map[string]string { - if m != nil { - return m.CreateSnapshotSecrets - } - return nil -} - -func (m *CreateSnapshotRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -type CreateSnapshotResponse struct { - // Contains all attributes of the newly created snapshot that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the snapshot. This field is REQUIRED. - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } -func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotResponse) ProtoMessage() {} -func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{31} -} -func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) -} -func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) -} -func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) -} -func (m *CreateSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotResponse.Size(m) -} -func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo - -func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -// The information about a provisioned snapshot. -type Snapshot struct { - // This is the complete size of the snapshot in bytes. The purpose of - // this field is to give CO guidance on how much space is needed to - // create a volume from this snapshot. The size of the volume MUST NOT - // be less than the size of the source snapshot. This field is - // OPTIONAL. If this field is not set, it indicates that this size is - // unknown. The value of this field MUST NOT be negative and a size of - // zero means it is unspecified. - SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes" json:"size_bytes,omitempty"` - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // Identity information for the source volume. Note that creating a - // snapshot from a snapshot is not supported here so the source has to - // be a volume. This field is REQUIRED. - SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` - // The status of a snapshot. - Status *SnapshotStatus `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{32} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetSizeBytes() int64 { - if m != nil { - return m.SizeBytes - } - return 0 -} - -func (m *Snapshot) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Snapshot) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *Snapshot) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Snapshot) GetStatus() *SnapshotStatus { - if m != nil { - return m.Status - } - return nil -} - -// The status of a snapshot. -type SnapshotStatus struct { - // This field is REQUIRED. - Type SnapshotStatus_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.SnapshotStatus_Type" json:"type,omitempty"` - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotStatus) Reset() { *m = SnapshotStatus{} } -func (m *SnapshotStatus) String() string { return proto.CompactTextString(m) } -func (*SnapshotStatus) ProtoMessage() {} -func (*SnapshotStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{33} -} -func (m *SnapshotStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SnapshotStatus.Unmarshal(m, b) -} -func (m *SnapshotStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SnapshotStatus.Marshal(b, m, deterministic) -} -func (dst *SnapshotStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotStatus.Merge(dst, src) -} -func (m *SnapshotStatus) XXX_Size() int { - return xxx_messageInfo_SnapshotStatus.Size(m) -} -func (m *SnapshotStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotStatus proto.InternalMessageInfo - -func (m *SnapshotStatus) GetType() SnapshotStatus_Type { - if m != nil { - return m.Type - } - return SnapshotStatus_UNKNOWN -} - -func (m *SnapshotStatus) GetDetails() string { - if m != nil { - return m.Details - } - return "" -} - -type DeleteSnapshotRequest struct { - // The ID of the snapshot to be deleted. - // This field is REQUIRED. - SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` - // Secrets required by plugin to complete snapshot deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - DeleteSnapshotSecrets map[string]string `protobuf:"bytes,2,rep,name=delete_snapshot_secrets,json=deleteSnapshotSecrets" json:"delete_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } -func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotRequest) ProtoMessage() {} -func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{34} -} -func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) -} -func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) -} -func (m *DeleteSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotRequest.Size(m) -} -func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo - -func (m *DeleteSnapshotRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId - } - return "" -} - -func (m *DeleteSnapshotRequest) GetDeleteSnapshotSecrets() map[string]string { - if m != nil { - return m.DeleteSnapshotSecrets - } - return nil -} - -type DeleteSnapshotResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } -func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotResponse) ProtoMessage() {} -func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{35} -} -func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) -} -func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) -} -func (m *DeleteSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotResponse.Size(m) -} -func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo - -// List all snapshots on the storage system regardless of how they were -// created. -type ListSnapshotsRequest struct { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListSnapshots` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` - // Identity information for the source volume. This field is OPTIONAL. - // It can be used to list snapshots by volume. - SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // Identity information for a specific snapshot. This field is - // OPTIONAL. It can be used to list only a specific snapshot. - // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. - SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } -func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsRequest) ProtoMessage() {} -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{36} -} -func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) -} -func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) -} -func (m *ListSnapshotsRequest) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsRequest.Size(m) -} -func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo - -func (m *ListSnapshotsRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries - } - return 0 -} - -func (m *ListSnapshotsRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken - } - return "" -} - -func (m *ListSnapshotsRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *ListSnapshotsRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId - } - return "" -} - -type ListSnapshotsResponse struct { - Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - // This token allows you to get the next page of entries for - // `ListSnapshots` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListSnapshots` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } -func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse) ProtoMessage() {} -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{37} -} -func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) -} -func (m *ListSnapshotsResponse) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse.Size(m) -} -func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo - -func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListSnapshotsResponse) GetNextToken() string { - if m != nil { - return m.NextToken - } - return "" -} - -type ListSnapshotsResponse_Entry struct { - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } -func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse_Entry) ProtoMessage() {} -func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{37, 0} -} -func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) -} -func (m *ListSnapshotsResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) -} -func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo - -func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type NodeStageVolumeRequest struct { - // The ID of the volume to publish. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. - // This is a REQUIRED field. - StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Secrets required by plugin to complete node stage volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - NodeStageSecrets map[string]string `protobuf:"bytes,5,rep,name=node_stage_secrets,json=nodeStageSecrets" json:"node_stage_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } -func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeRequest) ProtoMessage() {} -func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{38} -} -func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) -} -func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) -} -func (m *NodeStageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeRequest.Size(m) -} -func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo - -func (m *NodeStageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeStageVolumeRequest) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *NodeStageVolumeRequest) GetNodeStageSecrets() map[string]string { - if m != nil { - return m.NodeStageSecrets - } - return nil -} - -func (m *NodeStageVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type NodeStageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } -func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeResponse) ProtoMessage() {} -func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{39} -} -func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) -} -func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) -} -func (m *NodeStageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeResponse.Size(m) -} -func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo - -type NodeUnstageVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } -func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeRequest) ProtoMessage() {} -func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{40} -} -func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) -} -func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) -} -func (m *NodeUnstageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) -} -func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo - -func (m *NodeUnstageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -type NodeUnstageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } -func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeResponse) ProtoMessage() {} -func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{41} -} -func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) -} -func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) -} -func (m *NodeUnstageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) -} -func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo - -type NodePublishVolumeRequest struct { - // The ID of the volume to publish. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The path to which the device was mounted by `NodeStageVolume`. - // It MUST be an absolute path in the root filesystem of the process - // serving this request. - // It MUST be set if the Node Plugin implements the - // `STAGE_UNSTAGE_VOLUME` node capability. - // This is an OPTIONAL field. - StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` - // Secrets required by plugin to complete node publish volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - NodePublishSecrets map[string]string `protobuf:"bytes,7,rep,name=node_publish_secrets,json=nodePublishSecrets" json:"node_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } -func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeRequest) ProtoMessage() {} -func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{42} -} -func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) -} -func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) -} -func (m *NodePublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeRequest.Size(m) -} -func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo - -func (m *NodePublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodePublishVolumeRequest) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -func (m *NodePublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *NodePublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *NodePublishVolumeRequest) GetNodePublishSecrets() map[string]string { - if m != nil { - return m.NodePublishSecrets - } - return nil -} - -func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type NodePublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } -func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeResponse) ProtoMessage() {} -func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{43} -} -func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) -} -func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) -} -func (m *NodePublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeResponse.Size(m) -} -func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo - -type NodeUnpublishVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } -func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeRequest) ProtoMessage() {} -func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{44} -} -func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) -} -func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) -} -func (m *NodeUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) -} -func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo - -func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -type NodeUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } -func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeResponse) ProtoMessage() {} -func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{45} -} -func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) -} -func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) -} -func (m *NodeUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) -} -func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo - -type NodeGetIdRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetIdRequest) Reset() { *m = NodeGetIdRequest{} } -func (m *NodeGetIdRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetIdRequest) ProtoMessage() {} -func (*NodeGetIdRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{46} -} -func (m *NodeGetIdRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetIdRequest.Unmarshal(m, b) -} -func (m *NodeGetIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetIdRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetIdRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetIdRequest.Merge(dst, src) -} -func (m *NodeGetIdRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetIdRequest.Size(m) -} -func (m *NodeGetIdRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetIdRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetIdRequest proto.InternalMessageInfo - -type NodeGetIdResponse struct { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. - // This is a REQUIRED field. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetIdResponse) Reset() { *m = NodeGetIdResponse{} } -func (m *NodeGetIdResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetIdResponse) ProtoMessage() {} -func (*NodeGetIdResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{47} -} -func (m *NodeGetIdResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetIdResponse.Unmarshal(m, b) -} -func (m *NodeGetIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetIdResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetIdResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetIdResponse.Merge(dst, src) -} -func (m *NodeGetIdResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetIdResponse.Size(m) -} -func (m *NodeGetIdResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetIdResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetIdResponse proto.InternalMessageInfo - -func (m *NodeGetIdResponse) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -type NodeGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } -func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesRequest) ProtoMessage() {} -func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{48} -} -func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) -} -func (m *NodeGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) -} -func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo - -type NodeGetCapabilitiesResponse struct { - // All the capabilities that the node service supports. This field - // is OPTIONAL. - Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } -func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesResponse) ProtoMessage() {} -func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{49} -} -func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) -} -func (m *NodeGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) -} -func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo - -func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the node service. -type NodeServiceCapability struct { - // Types that are valid to be assigned to Type: - // *NodeServiceCapability_Rpc - Type isNodeServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } -func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability) ProtoMessage() {} -func (*NodeServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50} -} -func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) -} -func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) -} -func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability.Merge(dst, src) -} -func (m *NodeServiceCapability) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability.Size(m) -} -func (m *NodeServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo - -type isNodeServiceCapability_Type interface { - isNodeServiceCapability_Type() -} - -type NodeServiceCapability_Rpc struct { - Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} - -func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { - if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ - (*NodeServiceCapability_Rpc)(nil), - } -} - -func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NodeServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NodeServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &NodeServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type NodeServiceCapability_RPC struct { - Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.NodeServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } -func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability_RPC) ProtoMessage() {} -func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50, 0} -} -func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) -} -func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) -} -func (m *NodeServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) -} -func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo - -func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return NodeServiceCapability_RPC_UNKNOWN -} - -type NodeGetInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } -func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoRequest) ProtoMessage() {} -func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{51} -} -func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) -} -func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) -} -func (m *NodeGetInfoRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoRequest.Size(m) -} -func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo - -type NodeGetInfoResponse struct { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // Maximum number of volumes that controller can publish to the node. - // If value is not set or zero CO SHALL decide how many volumes of - // this type can be published by the controller to the node. The - // plugin MUST NOT set negative values here. - // This field is OPTIONAL. - MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode" json:"max_volumes_per_node,omitempty"` - // Specifies where (regions, zones, racks, etc.) the node is - // accessible from. - // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // COs MAY use this information along with the topology information - // returned in CreateVolumeResponse to ensure that a given volume is - // accessible from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the node is not subject to any topological constraint, and MAY - // schedule workloads that reference any volume V, such that there are - // no topological constraints declared for V. - // - // Example 1: - // accessible_topology = - // {"region": "R1", "zone": "R2"} - // Indicates the node exists within the "region" "R1" and the "zone" - // "Z2". - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } -func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoResponse) ProtoMessage() {} -func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{52} -} -func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) -} -func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) -} -func (m *NodeGetInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoResponse.Size(m) -} -func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo - -func (m *NodeGetInfoResponse) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { - if m != nil { - return m.MaxVolumesPerNode - } - return 0 -} - -func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -func init() { - proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v0.GetPluginInfoRequest") - proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v0.GetPluginInfoResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetPluginInfoResponse.ManifestEntry") - proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v0.GetPluginCapabilitiesRequest") - proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v0.GetPluginCapabilitiesResponse") - proto.RegisterType((*PluginCapability)(nil), "csi.v0.PluginCapability") - proto.RegisterType((*PluginCapability_Service)(nil), "csi.v0.PluginCapability.Service") - proto.RegisterType((*ProbeRequest)(nil), "csi.v0.ProbeRequest") - proto.RegisterType((*ProbeResponse)(nil), "csi.v0.ProbeResponse") - proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v0.CreateVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ControllerCreateSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ParametersEntry") - proto.RegisterType((*VolumeContentSource)(nil), "csi.v0.VolumeContentSource") - proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v0.VolumeContentSource.SnapshotSource") - proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v0.CreateVolumeResponse") - proto.RegisterType((*VolumeCapability)(nil), "csi.v0.VolumeCapability") - proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v0.VolumeCapability.BlockVolume") - proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v0.VolumeCapability.MountVolume") - proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v0.VolumeCapability.AccessMode") - proto.RegisterType((*CapacityRange)(nil), "csi.v0.CapacityRange") - proto.RegisterType((*Volume)(nil), "csi.v0.Volume") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.Volume.AttributesEntry") - proto.RegisterType((*TopologyRequirement)(nil), "csi.v0.TopologyRequirement") - proto.RegisterType((*Topology)(nil), "csi.v0.Topology") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.Topology.SegmentsEntry") - proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v0.DeleteVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteVolumeRequest.ControllerDeleteSecretsEntry") - proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v0.DeleteVolumeResponse") - proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v0.ControllerPublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.ControllerPublishSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v0.ControllerPublishVolumeResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeResponse.PublishInfoEntry") - proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v0.ControllerUnpublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerUnpublishVolumeRequest.ControllerUnpublishSecretsEntry") - proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v0.ControllerUnpublishVolumeResponse") - proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest.VolumeAttributesEntry") - proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v0.ValidateVolumeCapabilitiesResponse") - proto.RegisterType((*ListVolumesRequest)(nil), "csi.v0.ListVolumesRequest") - proto.RegisterType((*ListVolumesResponse)(nil), "csi.v0.ListVolumesResponse") - proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v0.ListVolumesResponse.Entry") - proto.RegisterType((*GetCapacityRequest)(nil), "csi.v0.GetCapacityRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetCapacityRequest.ParametersEntry") - proto.RegisterType((*GetCapacityResponse)(nil), "csi.v0.GetCapacityResponse") - proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v0.ControllerGetCapabilitiesRequest") - proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v0.ControllerGetCapabilitiesResponse") - proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v0.ControllerServiceCapability") - proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v0.ControllerServiceCapability.RPC") - proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v0.CreateSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.CreateSnapshotSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.ParametersEntry") - proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v0.CreateSnapshotResponse") - proto.RegisterType((*Snapshot)(nil), "csi.v0.Snapshot") - proto.RegisterType((*SnapshotStatus)(nil), "csi.v0.SnapshotStatus") - proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v0.DeleteSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteSnapshotRequest.DeleteSnapshotSecretsEntry") - proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v0.DeleteSnapshotResponse") - proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v0.ListSnapshotsRequest") - proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v0.ListSnapshotsResponse") - proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v0.ListSnapshotsResponse.Entry") - proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v0.NodeStageVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.NodeStageSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.PublishInfoEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v0.NodeStageVolumeResponse") - proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v0.NodeUnstageVolumeRequest") - proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v0.NodeUnstageVolumeResponse") - proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v0.NodePublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.NodePublishSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.PublishInfoEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v0.NodePublishVolumeResponse") - proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v0.NodeUnpublishVolumeRequest") - proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v0.NodeUnpublishVolumeResponse") - proto.RegisterType((*NodeGetIdRequest)(nil), "csi.v0.NodeGetIdRequest") - proto.RegisterType((*NodeGetIdResponse)(nil), "csi.v0.NodeGetIdResponse") - proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v0.NodeGetCapabilitiesRequest") - proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v0.NodeGetCapabilitiesResponse") - proto.RegisterType((*NodeServiceCapability)(nil), "csi.v0.NodeServiceCapability") - proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v0.NodeServiceCapability.RPC") - proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v0.NodeGetInfoRequest") - proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v0.NodeGetInfoResponse") - proto.RegisterEnum("csi.v0.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) - proto.RegisterEnum("csi.v0.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) - proto.RegisterEnum("csi.v0.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.v0.SnapshotStatus_Type", SnapshotStatus_Type_name, SnapshotStatus_Type_value) - proto.RegisterEnum("csi.v0.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Identity service - -type IdentityClient interface { - GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) - GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) - Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) -} - -type identityClient struct { - cc *grpc.ClientConn -} - -func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { - return &identityClient{cc} -} - -func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { - out := new(GetPluginInfoResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { - out := new(GetPluginCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { - out := new(ProbeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/Probe", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Identity service - -type IdentityServer interface { - GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) - GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) - Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) -} - -func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { - s.RegisterService(&_Identity_serviceDesc, srv) -} - -func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/GetPluginInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/GetPluginCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProbeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).Probe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/Probe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Identity_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Identity", - HandlerType: (*IdentityServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPluginInfo", - Handler: _Identity_GetPluginInfo_Handler, - }, - { - MethodName: "GetPluginCapabilities", - Handler: _Identity_GetPluginCapabilities_Handler, - }, - { - MethodName: "Probe", - Handler: _Identity_Probe_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Controller service - -type ControllerClient interface { - CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) - DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) - ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) - GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) - ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) - DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) - ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) -} - -type controllerClient struct { - cc *grpc.ClientConn -} - -func NewControllerClient(cc *grpc.ClientConn) ControllerClient { - return &controllerClient{cc} -} - -func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { - out := new(CreateVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { - out := new(DeleteVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { - out := new(ControllerPublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerPublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { - out := new(ControllerUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { - out := new(ValidateVolumeCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { - out := new(ListVolumesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ListVolumes", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { - out := new(GetCapacityResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/GetCapacity", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { - out := new(ControllerGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { - out := new(CreateSnapshotResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateSnapshot", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { - out := new(DeleteSnapshotResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteSnapshot", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { - out := new(ListSnapshotsResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ListSnapshots", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Controller service - -type ControllerServer interface { - CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) - DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) - ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) - GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) - ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) - DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) - ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) -} - -func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { - s.RegisterService(&_Controller_serviceDesc, srv) -} - -func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/CreateVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/DeleteVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerPublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerPublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerPublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateVolumeCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ValidateVolumeCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListVolumesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListVolumes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ListVolumes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetCapacityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).GetCapacity(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/GetCapacity", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/CreateSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/DeleteSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSnapshotsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ListSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Controller_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Controller", - HandlerType: (*ControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateVolume", - Handler: _Controller_CreateVolume_Handler, - }, - { - MethodName: "DeleteVolume", - Handler: _Controller_DeleteVolume_Handler, - }, - { - MethodName: "ControllerPublishVolume", - Handler: _Controller_ControllerPublishVolume_Handler, - }, - { - MethodName: "ControllerUnpublishVolume", - Handler: _Controller_ControllerUnpublishVolume_Handler, - }, - { - MethodName: "ValidateVolumeCapabilities", - Handler: _Controller_ValidateVolumeCapabilities_Handler, - }, - { - MethodName: "ListVolumes", - Handler: _Controller_ListVolumes_Handler, - }, - { - MethodName: "GetCapacity", - Handler: _Controller_GetCapacity_Handler, - }, - { - MethodName: "ControllerGetCapabilities", - Handler: _Controller_ControllerGetCapabilities_Handler, - }, - { - MethodName: "CreateSnapshot", - Handler: _Controller_CreateSnapshot_Handler, - }, - { - MethodName: "DeleteSnapshot", - Handler: _Controller_DeleteSnapshot_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _Controller_ListSnapshots_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Node service - -type NodeClient interface { - NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) - NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. - NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) -} - -type nodeClient struct { - cc *grpc.ClientConn -} - -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { - out := new(NodeStageVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeStageVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { - out := new(NodeUnstageVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnstageVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { - out := new(NodePublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodePublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { - out := new(NodeUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *nodeClient) NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) { - out := new(NodeGetIdResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetId", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { - out := new(NodeGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { - out := new(NodeGetInfoResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Node service - -type NodeServer interface { - NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - NodeGetId(context.Context, *NodeGetIdRequest) (*NodeGetIdResponse, error) - NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. - NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) -} - -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) -} - -func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeStageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeStageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeStageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnstageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnstageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeUnstageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodePublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetIdRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetId(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetId", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetId(ctx, req.(*NodeGetIdRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeStageVolume", - Handler: _Node_NodeStageVolume_Handler, - }, - { - MethodName: "NodeUnstageVolume", - Handler: _Node_NodeUnstageVolume_Handler, - }, - { - MethodName: "NodePublishVolume", - Handler: _Node_NodePublishVolume_Handler, - }, - { - MethodName: "NodeUnpublishVolume", - Handler: _Node_NodeUnpublishVolume_Handler, - }, - { - MethodName: "NodeGetId", - Handler: _Node_NodeGetId_Handler, - }, - { - MethodName: "NodeGetCapabilities", - Handler: _Node_NodeGetCapabilities_Handler, - }, - { - MethodName: "NodeGetInfo", - Handler: _Node_NodeGetInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -func init() { proto.RegisterFile("csi.proto", fileDescriptor_csi_31237507707d37ec) } - -var fileDescriptor_csi_31237507707d37ec = []byte{ - // 2932 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x73, 0x23, 0x47, - 0xd5, 0xa3, 0x0f, 0xdb, 0x7a, 0x5e, 0x3b, 0xda, 0xf6, 0x97, 0x3c, 0xb6, 0x77, 0xbd, 0xb3, 0xd9, - 0x64, 0x13, 0x12, 0x6d, 0x30, 0x24, 0x15, 0x92, 0x4d, 0x40, 0x96, 0x15, 0x5b, 0x59, 0x5b, 0x36, - 0x23, 0xd9, 0xa9, 0x5d, 0x42, 0x4d, 0xc6, 0x52, 0x5b, 0x3b, 0xac, 0x3c, 0xa3, 0xcc, 0x8c, 0xcc, - 0x9a, 0x1b, 0x70, 0x01, 0x4e, 0xf0, 0x0b, 0x52, 0x95, 0x1b, 0x14, 0xb9, 0x50, 0xdc, 0xa8, 0xe2, - 0x46, 0x15, 0x27, 0xce, 0x9c, 0xb8, 0xa7, 0xe0, 0xc8, 0x89, 0x2a, 0xaa, 0xa8, 0x9e, 0xee, 0x19, - 0x4d, 0xb7, 0x7a, 0xf4, 0x91, 0xdd, 0x4a, 0x71, 0x92, 0xe6, 0x7d, 0xf5, 0xeb, 0xd7, 0xef, 0xbd, - 0x7e, 0xef, 0xcd, 0x40, 0xae, 0xe9, 0x59, 0xc5, 0xae, 0xeb, 0xf8, 0x0e, 0x9a, 0x26, 0x7f, 0x2f, - 0xdf, 0x50, 0x6f, 0xb4, 0x1d, 0xa7, 0xdd, 0xc1, 0xf7, 0x02, 0xe8, 0x59, 0xef, 0xfc, 0xde, 0x8f, - 0x5d, 0xb3, 0xdb, 0xc5, 0xae, 0x47, 0xe9, 0xb4, 0x15, 0x58, 0xda, 0xc3, 0xfe, 0x71, 0xa7, 0xd7, - 0xb6, 0xec, 0xaa, 0x7d, 0xee, 0xe8, 0xf8, 0xd3, 0x1e, 0xf6, 0x7c, 0xed, 0xef, 0x0a, 0x2c, 0x0b, - 0x08, 0xaf, 0xeb, 0xd8, 0x1e, 0x46, 0x08, 0x32, 0xb6, 0x79, 0x81, 0x0b, 0xca, 0x96, 0x72, 0x37, - 0xa7, 0x07, 0xff, 0xd1, 0x1d, 0x58, 0xb8, 0xc4, 0x76, 0xcb, 0x71, 0x8d, 0x4b, 0xec, 0x7a, 0x96, - 0x63, 0x17, 0x52, 0x01, 0x76, 0x9e, 0x42, 0x4f, 0x29, 0x10, 0xed, 0xc1, 0xec, 0x85, 0x69, 0x5b, - 0xe7, 0xd8, 0xf3, 0x0b, 0xe9, 0xad, 0xf4, 0xdd, 0xb9, 0xed, 0x6f, 0x14, 0xa9, 0x9e, 0x45, 0xe9, - 0x5a, 0xc5, 0x43, 0x46, 0x5d, 0xb1, 0x7d, 0xf7, 0x4a, 0x8f, 0x98, 0xd5, 0x77, 0x61, 0x9e, 0x43, - 0xa1, 0x3c, 0xa4, 0x9f, 0xe0, 0x2b, 0xa6, 0x13, 0xf9, 0x8b, 0x96, 0x20, 0x7b, 0x69, 0x76, 0x7a, - 0x98, 0x69, 0x42, 0x1f, 0xde, 0x49, 0xbd, 0xad, 0x68, 0x37, 0x60, 0x23, 0x5a, 0xad, 0x6c, 0x76, - 0xcd, 0x33, 0xab, 0x63, 0xf9, 0x16, 0xf6, 0xc2, 0xad, 0xff, 0x10, 0x36, 0x13, 0xf0, 0xcc, 0x02, - 0xf7, 0xe1, 0x5a, 0x33, 0x06, 0x2f, 0xa4, 0x82, 0xad, 0x14, 0xc2, 0xad, 0x08, 0x9c, 0x57, 0x3a, - 0x47, 0xad, 0xfd, 0x53, 0x81, 0xbc, 0x48, 0x82, 0xee, 0xc3, 0x8c, 0x87, 0xdd, 0x4b, 0xab, 0x49, - 0xed, 0x3a, 0xb7, 0xbd, 0x95, 0x24, 0xad, 0x58, 0xa7, 0x74, 0xfb, 0x53, 0x7a, 0xc8, 0xa2, 0xfe, - 0x5a, 0x81, 0x19, 0x06, 0x46, 0xdf, 0x81, 0x8c, 0x7f, 0xd5, 0xa5, 0x62, 0x16, 0xb6, 0xef, 0x8c, - 0x12, 0x53, 0x6c, 0x5c, 0x75, 0xb1, 0x1e, 0xb0, 0x68, 0x1f, 0x42, 0x86, 0x3c, 0xa1, 0x39, 0x98, - 0x39, 0xa9, 0x3d, 0xa8, 0x1d, 0x7d, 0x54, 0xcb, 0x4f, 0xa1, 0x15, 0x40, 0xe5, 0xa3, 0x5a, 0x43, - 0x3f, 0x3a, 0x38, 0xa8, 0xe8, 0x46, 0xbd, 0xa2, 0x9f, 0x56, 0xcb, 0x95, 0xbc, 0x82, 0x36, 0x61, - 0xad, 0x54, 0x2e, 0x57, 0xea, 0xf5, 0xea, 0x4e, 0xf5, 0xa0, 0xda, 0x78, 0x68, 0x94, 0x8f, 0x6a, - 0xf5, 0x86, 0x5e, 0xaa, 0xd6, 0x1a, 0xf5, 0x7c, 0x6a, 0x67, 0x9a, 0xaa, 0xa1, 0x2d, 0xc0, 0xb5, - 0x63, 0xd7, 0x39, 0xc3, 0xa1, 0x71, 0x4b, 0x30, 0xcf, 0x9e, 0x99, 0x31, 0xdf, 0x80, 0xac, 0x8b, - 0xcd, 0xd6, 0x15, 0xdb, 0xb7, 0x5a, 0xa4, 0x0e, 0x5b, 0x0c, 0x1d, 0xb6, 0xb8, 0xe3, 0x38, 0x9d, - 0x53, 0x72, 0x78, 0x3a, 0x25, 0xd4, 0xbe, 0xc8, 0xc2, 0x62, 0xd9, 0xc5, 0xa6, 0x8f, 0x4f, 0x9d, - 0x4e, 0xef, 0x22, 0x14, 0x2d, 0x75, 0xcc, 0xfb, 0xb0, 0x40, 0x8c, 0xdf, 0xb4, 0xfc, 0x2b, 0xc3, - 0x35, 0xed, 0x36, 0x75, 0x87, 0xb9, 0xed, 0xe5, 0xd0, 0x2e, 0x65, 0x86, 0xd5, 0x09, 0x52, 0x9f, - 0x6f, 0xc6, 0x1f, 0x51, 0x15, 0x16, 0x2f, 0x83, 0x25, 0x0c, 0xee, 0xbc, 0xd3, 0xfc, 0x79, 0x53, - 0x2d, 0x62, 0xe7, 0x8d, 0x2e, 0x79, 0x88, 0x85, 0x3d, 0xf4, 0x00, 0xa0, 0x6b, 0xba, 0xe6, 0x05, - 0xf6, 0xb1, 0xeb, 0x15, 0x32, 0xbc, 0xf3, 0x4b, 0x76, 0x53, 0x3c, 0x8e, 0xa8, 0xa9, 0xf3, 0xc7, - 0xd8, 0x91, 0x0f, 0x6b, 0x4d, 0xc7, 0xf6, 0x5d, 0xa7, 0xd3, 0xc1, 0xae, 0xd1, 0x0c, 0xb8, 0x0d, - 0x0f, 0x37, 0x5d, 0xec, 0x7b, 0x85, 0x6c, 0x20, 0xfb, 0xed, 0x61, 0xb2, 0xcb, 0x11, 0x33, 0xc5, - 0xd6, 0x29, 0x2b, 0x5d, 0x68, 0xb5, 0x29, 0xc7, 0xa2, 0x23, 0x58, 0x0e, 0xad, 0xe1, 0xd8, 0x3e, - 0xb6, 0x7d, 0xc3, 0x73, 0x7a, 0x6e, 0x13, 0x17, 0xa6, 0x03, 0x93, 0xae, 0x0b, 0xf6, 0xa0, 0x34, - 0xf5, 0x80, 0x44, 0x67, 0x76, 0xe4, 0x80, 0xe8, 0x11, 0xa8, 0x66, 0xb3, 0x89, 0x3d, 0xcf, 0xa2, - 0x86, 0x33, 0x5c, 0xfc, 0x69, 0xcf, 0x72, 0xf1, 0x05, 0xb6, 0x7d, 0xaf, 0x30, 0xc3, 0x4b, 0x6d, - 0x38, 0x5d, 0xa7, 0xe3, 0xb4, 0xaf, 0xf4, 0x3e, 0x8d, 0xbe, 0xc6, 0xb1, 0xc7, 0x30, 0x9e, 0xfa, - 0x1e, 0xbc, 0x20, 0x58, 0x70, 0x92, 0x1c, 0xa1, 0x7e, 0x08, 0x1b, 0xc3, 0x8c, 0x34, 0x51, 0xbe, - 0xf9, 0xa5, 0x02, 0x8b, 0x12, 0x9b, 0xa0, 0x7d, 0x98, 0xf5, 0x6c, 0xb3, 0xeb, 0x3d, 0x76, 0x7c, - 0xe6, 0xfc, 0xaf, 0x0e, 0x31, 0x61, 0xb1, 0xce, 0x68, 0xe9, 0xe3, 0xfe, 0x94, 0x1e, 0x71, 0xab, - 0x5b, 0xb0, 0xc0, 0x63, 0xd1, 0x02, 0xa4, 0xac, 0x16, 0x53, 0x2f, 0x65, 0xb5, 0xa2, 0x70, 0x7c, - 0x1f, 0x96, 0x78, 0x87, 0x60, 0x51, 0xf8, 0x12, 0x4c, 0xd3, 0x13, 0x62, 0x9a, 0x2c, 0xf0, 0x9a, - 0xe8, 0x0c, 0xab, 0xfd, 0x2e, 0x03, 0x79, 0xd1, 0xdf, 0xd1, 0x7d, 0xc8, 0x9e, 0x75, 0x9c, 0xe6, - 0x13, 0xc6, 0xfb, 0x62, 0x52, 0x60, 0x14, 0x77, 0x08, 0x15, 0x85, 0xee, 0x4f, 0xe9, 0x94, 0x89, - 0x70, 0x5f, 0x38, 0x3d, 0xdb, 0x67, 0x91, 0x99, 0xcc, 0x7d, 0x48, 0xa8, 0xfa, 0xdc, 0x01, 0x13, - 0xda, 0x85, 0x39, 0xea, 0x04, 0xc6, 0x85, 0xd3, 0xc2, 0x85, 0x74, 0x20, 0xe3, 0x76, 0xa2, 0x8c, - 0x52, 0x40, 0x7b, 0xe8, 0xb4, 0xb0, 0x0e, 0x66, 0xf4, 0x5f, 0x9d, 0x87, 0xb9, 0x98, 0x6e, 0xea, - 0x1e, 0xcc, 0xc5, 0x16, 0x43, 0xab, 0x30, 0x73, 0xee, 0x19, 0x51, 0x56, 0xcd, 0xe9, 0xd3, 0xe7, - 0x5e, 0x90, 0x28, 0x6f, 0xc2, 0x5c, 0xa0, 0x85, 0x71, 0xde, 0x31, 0xdb, 0xf4, 0x1e, 0xc8, 0xe9, - 0x10, 0x80, 0x3e, 0x20, 0x10, 0xf5, 0x5f, 0x0a, 0x40, 0x7f, 0x49, 0x74, 0x1f, 0x32, 0x81, 0x96, - 0x34, 0x37, 0xdf, 0x1d, 0x43, 0xcb, 0x62, 0xa0, 0x6a, 0xc0, 0xa5, 0x7d, 0xa6, 0x40, 0x26, 0x10, - 0x23, 0xe6, 0xe7, 0x7a, 0xb5, 0xb6, 0x77, 0x50, 0x31, 0x6a, 0x47, 0xbb, 0x15, 0xe3, 0x23, 0xbd, - 0xda, 0xa8, 0xe8, 0x79, 0x05, 0xad, 0xc3, 0x6a, 0x1c, 0xae, 0x57, 0x4a, 0xbb, 0x15, 0xdd, 0x38, - 0xaa, 0x1d, 0x3c, 0xcc, 0xa7, 0x90, 0x0a, 0x2b, 0x87, 0x27, 0x07, 0x8d, 0xea, 0x20, 0x2e, 0x8d, - 0x36, 0xa0, 0x10, 0xc3, 0x31, 0x19, 0x4c, 0x6c, 0x86, 0x88, 0x8d, 0x61, 0xe9, 0x5f, 0x86, 0xcc, - 0xee, 0xcc, 0x47, 0x87, 0x11, 0x38, 0xdb, 0x47, 0x30, 0xcf, 0xa5, 0x57, 0x52, 0x26, 0xb0, 0x10, - 0x6f, 0x19, 0x67, 0x57, 0x3e, 0xf6, 0x02, 0x4b, 0xa4, 0xf5, 0xf9, 0x10, 0xba, 0x43, 0x80, 0xc4, - 0xac, 0x1d, 0xeb, 0xc2, 0xf2, 0x19, 0x4d, 0x2a, 0xa0, 0x81, 0x00, 0x14, 0x10, 0x68, 0x7f, 0x49, - 0xc1, 0x34, 0x3b, 0x9b, 0x3b, 0xb1, 0x04, 0xcf, 0x89, 0x0c, 0xa1, 0x54, 0x24, 0x8d, 0x87, 0x54, - 0x18, 0x0f, 0xe8, 0x7d, 0x00, 0xd3, 0xf7, 0x5d, 0xeb, 0xac, 0xe7, 0x47, 0x09, 0xfd, 0x06, 0x7f, - 0x1e, 0xc5, 0x52, 0x44, 0xc0, 0x32, 0x70, 0x9f, 0x03, 0xed, 0xc0, 0x82, 0x90, 0x04, 0x33, 0xa3, - 0x93, 0xe0, 0x7c, 0x93, 0x8b, 0xff, 0x12, 0x2c, 0x86, 0xf9, 0xab, 0x83, 0x0d, 0x9f, 0xe5, 0x37, - 0x96, 0xbf, 0xf3, 0x03, 0x79, 0x0f, 0xf5, 0x89, 0x43, 0x18, 0xc9, 0x72, 0x82, 0x96, 0x13, 0x65, - 0xa6, 0x1e, 0x2c, 0x4a, 0xd2, 0x2a, 0x2a, 0x42, 0x2e, 0x38, 0x10, 0xcf, 0xf2, 0x89, 0xaf, 0xca, - 0xd5, 0xe9, 0x93, 0x10, 0xfa, 0xae, 0x8b, 0xcf, 0xb1, 0xeb, 0xe2, 0x16, 0x2b, 0x86, 0x24, 0xf4, - 0x11, 0x89, 0xf6, 0x73, 0x05, 0x66, 0x43, 0x38, 0x7a, 0x07, 0x66, 0x3d, 0xdc, 0xa6, 0x29, 0x5f, - 0xe1, 0xcf, 0x21, 0xa4, 0x29, 0xd6, 0x19, 0x01, 0x2b, 0x03, 0x43, 0x7a, 0x52, 0x06, 0x72, 0xa8, - 0x89, 0x36, 0xff, 0x6f, 0x05, 0x16, 0x77, 0x71, 0x07, 0x8b, 0x65, 0xc4, 0x3a, 0xe4, 0xd8, 0x35, - 0x17, 0x65, 0xd0, 0x59, 0x0a, 0xa8, 0xb6, 0x84, 0x9b, 0xb7, 0x15, 0xb0, 0x47, 0x37, 0x6f, 0x8a, - 0xbf, 0x79, 0x25, 0xc2, 0x63, 0x37, 0x2f, 0xc5, 0x26, 0xdd, 0xbc, 0x1c, 0x96, 0xbf, 0x8d, 0x06, - 0x19, 0x27, 0xda, 0xf6, 0x0a, 0x2c, 0xf1, 0x8a, 0xd1, 0x1b, 0x40, 0xfb, 0x53, 0x06, 0x6e, 0xf4, - 0x17, 0x39, 0xee, 0x9d, 0x75, 0x2c, 0xef, 0xf1, 0x04, 0x96, 0x59, 0x85, 0x19, 0xdb, 0x69, 0x05, - 0x28, 0xba, 0xe6, 0x34, 0x79, 0xac, 0xb6, 0x50, 0x05, 0xae, 0x8b, 0x45, 0xd4, 0x15, 0xcb, 0xd3, - 0xc9, 0x25, 0x54, 0xfe, 0x52, 0xbc, 0x64, 0x54, 0x98, 0x25, 0xe5, 0x9f, 0x63, 0x77, 0xae, 0x82, - 0x58, 0x9b, 0xd5, 0xa3, 0x67, 0xf4, 0x33, 0x05, 0xd4, 0xd8, 0xb1, 0x74, 0xa9, 0xf2, 0x42, 0x45, - 0xb4, 0x1b, 0x55, 0x44, 0x43, 0x77, 0x39, 0x88, 0xe6, 0xce, 0xa8, 0xd0, 0x4c, 0x40, 0x23, 0x2b, - 0xda, 0x67, 0x2c, 0xb3, 0x4c, 0x07, 0x4b, 0xdf, 0x1f, 0x73, 0x69, 0xfa, 0x24, 0xe6, 0x1d, 0x66, - 0x8b, 0x3e, 0x58, 0x7d, 0x00, 0x9b, 0x43, 0xb5, 0x9c, 0xa8, 0xd4, 0x29, 0xc3, 0xb2, 0x74, 0xdd, - 0x89, 0xbc, 0xea, 0xcf, 0x0a, 0xdc, 0x4c, 0xdc, 0x1c, 0xab, 0x31, 0x7e, 0x00, 0xd7, 0xc2, 0x93, - 0xb1, 0xec, 0x73, 0x87, 0x45, 0xfb, 0xdb, 0x23, 0x6d, 0xc3, 0x7a, 0x41, 0x06, 0x25, 0xfd, 0x21, - 0xb5, 0xcb, 0x5c, 0xb7, 0x0f, 0x51, 0xdf, 0x87, 0xbc, 0x48, 0x30, 0xd1, 0x06, 0xfe, 0x98, 0x82, - 0xad, 0xbe, 0x06, 0x27, 0x76, 0xf7, 0xf9, 0x05, 0xc0, 0xaf, 0x14, 0xd8, 0x88, 0x79, 0x67, 0xcf, - 0x16, 0xfd, 0x93, 0x5e, 0x3f, 0xfb, 0x83, 0x86, 0x90, 0xab, 0x21, 0x23, 0xe0, 0x7c, 0x34, 0x16, - 0x0b, 0x22, 0x81, 0x7a, 0x18, 0x3f, 0x27, 0x29, 0xfb, 0x44, 0x66, 0xbb, 0x0d, 0xb7, 0x86, 0xa8, - 0xcb, 0x52, 0xcb, 0x4f, 0xd3, 0x70, 0xeb, 0xd4, 0xec, 0x58, 0xad, 0xa8, 0xee, 0x94, 0xb4, 0xdd, - 0xc3, 0x8d, 0x9b, 0xd0, 0x89, 0xa5, 0xbe, 0x42, 0x27, 0xd6, 0x91, 0xc5, 0x29, 0x3d, 0x82, 0xef, - 0x46, 0x82, 0x46, 0x69, 0x3b, 0x6e, 0xa8, 0x26, 0x5d, 0xf2, 0x99, 0x09, 0x2e, 0xf9, 0xe7, 0x12, - 0xa0, 0x1f, 0x83, 0x36, 0x6c, 0x53, 0x2c, 0x44, 0x37, 0x20, 0xe7, 0xf5, 0xba, 0x5d, 0xc7, 0xf5, - 0x31, 0x3d, 0x83, 0x59, 0xbd, 0x0f, 0x40, 0x05, 0x98, 0xb9, 0xc0, 0x9e, 0x67, 0xb6, 0x43, 0xf9, - 0xe1, 0xa3, 0xf6, 0x31, 0xa0, 0x03, 0xcb, 0x63, 0xf5, 0x72, 0x74, 0xa2, 0xa4, 0x3c, 0x36, 0x9f, - 0x1a, 0xd8, 0xf6, 0x5d, 0x8b, 0x15, 0x66, 0x59, 0x1d, 0x2e, 0xcc, 0xa7, 0x15, 0x0a, 0x21, 0xc5, - 0x9b, 0xe7, 0x9b, 0xae, 0x6f, 0xd9, 0x6d, 0xc3, 0x77, 0x9e, 0xe0, 0x68, 0x6c, 0x14, 0x42, 0x1b, - 0x04, 0xa8, 0x7d, 0xae, 0xc0, 0x22, 0x27, 0x9e, 0x69, 0xfb, 0x2e, 0xcc, 0xf4, 0x65, 0x13, 0x7b, - 0xde, 0x0a, 0xed, 0x29, 0xa1, 0x2e, 0xd2, 0x13, 0x0a, 0x39, 0xd0, 0x26, 0x80, 0x8d, 0x9f, 0xfa, - 0xdc, 0xba, 0x39, 0x02, 0x09, 0xd6, 0x54, 0xef, 0x41, 0x96, 0x1a, 0x79, 0xdc, 0xce, 0xe8, 0x8b, - 0x14, 0xa0, 0x3d, 0xec, 0x47, 0x05, 0x2f, 0xb3, 0x41, 0x82, 0xe3, 0x2a, 0x5f, 0xc1, 0x71, 0x3f, - 0xe4, 0x46, 0x08, 0xd4, 0xf5, 0x5f, 0x8d, 0xcd, 0xcf, 0x84, 0xa5, 0x87, 0x4e, 0x10, 0x12, 0xdc, - 0x92, 0x5e, 0xcb, 0x63, 0xd7, 0x9e, 0xcf, 0xd0, 0x61, 0x6b, 0xbb, 0xb0, 0xc8, 0xe9, 0xcc, 0xce, - 0xf4, 0x75, 0x40, 0xe6, 0xa5, 0x69, 0x75, 0x4c, 0xa2, 0x57, 0x58, 0xc3, 0xb3, 0x9a, 0xfe, 0x7a, - 0x84, 0x09, 0xd9, 0x34, 0x2d, 0x9e, 0xb5, 0x99, 0x3c, 0x71, 0x9e, 0xd7, 0x89, 0xe7, 0xa8, 0x01, - 0x1a, 0xb6, 0xee, 0x9e, 0x74, 0xa6, 0x77, 0x7b, 0x30, 0x27, 0xb3, 0xb9, 0x59, 0xe2, 0x78, 0xef, - 0x6f, 0x29, 0x58, 0x1f, 0x42, 0x8d, 0xde, 0x85, 0xb4, 0xdb, 0x6d, 0x32, 0x67, 0x7a, 0x79, 0x0c, - 0xf9, 0x45, 0xfd, 0xb8, 0xbc, 0x3f, 0xa5, 0x13, 0x2e, 0xf5, 0x4b, 0x05, 0xd2, 0xfa, 0x71, 0x19, - 0x7d, 0x8f, 0x1b, 0xf2, 0xbd, 0x36, 0xa6, 0x94, 0xf8, 0xac, 0x8f, 0x34, 0x93, 0x83, 0xc3, 0xbe, - 0x02, 0x2c, 0x95, 0xf5, 0x4a, 0xa9, 0x51, 0x31, 0x76, 0x2b, 0x07, 0x95, 0x46, 0xc5, 0x38, 0x3d, - 0x3a, 0x38, 0x39, 0xac, 0xe4, 0x15, 0xd2, 0x15, 0x1e, 0x9f, 0xec, 0x1c, 0x54, 0xeb, 0xfb, 0xc6, - 0x49, 0x2d, 0xfc, 0xc7, 0xb0, 0x29, 0x94, 0x87, 0x6b, 0x07, 0xd5, 0x7a, 0x83, 0x01, 0xea, 0xf9, - 0x34, 0x81, 0xec, 0x55, 0x1a, 0x46, 0xb9, 0x74, 0x5c, 0x2a, 0x57, 0x1b, 0x0f, 0xf3, 0x19, 0xd2, - 0x73, 0xf2, 0xb2, 0xeb, 0xb5, 0xd2, 0x71, 0x7d, 0xff, 0xa8, 0x91, 0xcf, 0x22, 0x04, 0x0b, 0x01, - 0x7f, 0x08, 0xaa, 0xe7, 0xa7, 0xa3, 0x91, 0xc5, 0x67, 0x69, 0x58, 0x66, 0x13, 0x18, 0x36, 0xe3, - 0x08, 0x63, 0xeb, 0x2e, 0xe4, 0x69, 0xf3, 0x65, 0x88, 0x17, 0xc7, 0x02, 0x85, 0x9f, 0x86, 0xd7, - 0x47, 0x38, 0x1a, 0x4c, 0xc5, 0x46, 0x83, 0x5d, 0x58, 0x0d, 0x27, 0x67, 0x4c, 0xae, 0x70, 0x21, - 0x0b, 0x23, 0x34, 0x61, 0x75, 0x01, 0xca, 0x5d, 0xc0, 0xcb, 0x4d, 0x19, 0x0e, 0x1d, 0x4a, 0x66, - 0x80, 0xaf, 0x0f, 0x5f, 0x64, 0x48, 0x0c, 0xab, 0xfb, 0xa0, 0x26, 0xeb, 0x30, 0x51, 0x09, 0xf8, - 0x8c, 0xa1, 0xfc, 0x01, 0xac, 0x88, 0xda, 0xb3, 0xa8, 0x7a, 0x6d, 0x60, 0xc4, 0x15, 0xe5, 0x96, - 0x88, 0x36, 0xa2, 0xd0, 0xfe, 0xa0, 0xc0, 0x6c, 0x08, 0x26, 0xf9, 0xd9, 0xb3, 0x7e, 0x82, 0xb9, - 0xa6, 0x3e, 0x47, 0x20, 0xf2, 0x86, 0x5e, 0xe6, 0x0b, 0x69, 0xa9, 0x2f, 0x6c, 0x02, 0xd0, 0xe3, - 0x69, 0x19, 0xa6, 0x1f, 0xb4, 0x12, 0x69, 0x3d, 0xc7, 0x20, 0x25, 0xd2, 0xfc, 0x4e, 0x7b, 0xbe, - 0xe9, 0xf7, 0x48, 0xdb, 0x40, 0x14, 0x5e, 0x11, 0x15, 0xae, 0x07, 0x58, 0x9d, 0x51, 0x91, 0x40, - 0x5a, 0xe0, 0x51, 0xe8, 0x1e, 0x17, 0x9d, 0xeb, 0x72, 0x01, 0xb1, 0x60, 0x24, 0x17, 0x6b, 0x0b, - 0xfb, 0xa6, 0xd5, 0xf1, 0xc2, 0x8b, 0x95, 0x3d, 0x6a, 0x3b, 0xb2, 0x28, 0xcd, 0x41, 0x56, 0xaf, - 0x94, 0x76, 0x1f, 0xe6, 0x15, 0x34, 0x0f, 0xb9, 0x93, 0xe3, 0x83, 0xa3, 0xd2, 0x6e, 0xb5, 0xb6, - 0x97, 0x4f, 0xa1, 0x45, 0x78, 0xa1, 0xa2, 0xeb, 0x47, 0xba, 0xd1, 0x07, 0xa6, 0x49, 0xa3, 0xbb, - 0xcc, 0x9a, 0x46, 0x21, 0x80, 0x6e, 0xc2, 0x5c, 0xe4, 0xfb, 0x51, 0xec, 0x40, 0x08, 0xaa, 0xb6, - 0x48, 0x8c, 0x84, 0x3d, 0xae, 0x18, 0x23, 0xd2, 0x66, 0x57, 0x74, 0x5f, 0x1e, 0xca, 0xc7, 0x48, - 0x4b, 0x86, 0x23, 0x4e, 0x9d, 0xcc, 0x34, 0x91, 0x57, 0x16, 0x60, 0x45, 0x54, 0x8a, 0xd5, 0xa3, - 0xbf, 0x55, 0x60, 0x89, 0x54, 0x08, 0x21, 0xe2, 0x79, 0x17, 0x2c, 0x13, 0x38, 0xa3, 0x70, 0x02, - 0x19, 0xf1, 0x04, 0xb4, 0xdf, 0x2b, 0xb0, 0x2c, 0xe8, 0xca, 0x62, 0xeb, 0x3d, 0xb1, 0xfa, 0xb9, - 0x1d, 0xaf, 0x7e, 0x06, 0xe8, 0x27, 0xac, 0x7f, 0xde, 0x0c, 0xeb, 0x9f, 0xc9, 0x42, 0xf8, 0x37, - 0x59, 0x58, 0xa9, 0x39, 0x2d, 0x5c, 0xf7, 0xcd, 0xf6, 0x24, 0x73, 0x15, 0x5d, 0xe8, 0x0d, 0xa9, - 0x77, 0xdd, 0x0b, 0x57, 0x92, 0x8b, 0x1c, 0xde, 0x12, 0xa2, 0x22, 0x2c, 0x7a, 0xbe, 0xd9, 0x0e, - 0xce, 0xca, 0x74, 0xdb, 0xd8, 0x37, 0xba, 0xa6, 0xff, 0x98, 0x1d, 0xc4, 0x75, 0x86, 0x6a, 0x04, - 0x98, 0x63, 0xd3, 0x7f, 0x2c, 0x1f, 0x54, 0x64, 0x26, 0x1e, 0x54, 0x9c, 0x01, 0x0a, 0xfa, 0x40, - 0xb2, 0x80, 0xf8, 0x56, 0xe6, 0xdb, 0x23, 0x36, 0x14, 0x81, 0xb9, 0x50, 0xc9, 0xdb, 0x02, 0x18, - 0x99, 0xc9, 0xb3, 0x86, 0x51, 0x4b, 0x8c, 0x3b, 0x63, 0x78, 0xc6, 0x86, 0x9a, 0x74, 0x2d, 0xd2, - 0xdd, 0x7c, 0xfd, 0xb3, 0x89, 0x35, 0x58, 0x1d, 0xb0, 0x05, 0xcb, 0x04, 0x6d, 0x28, 0x10, 0xd4, - 0x89, 0xed, 0x4d, 0xe8, 0xaf, 0x09, 0xbe, 0x95, 0x4a, 0xf0, 0x2d, 0x6d, 0x1d, 0xd6, 0x24, 0x0b, - 0x31, 0x2d, 0xfe, 0x91, 0xa5, 0x6a, 0x4c, 0x3e, 0x74, 0x6b, 0x48, 0xc3, 0xe6, 0x9b, 0x71, 0x17, - 0x90, 0x0e, 0x9a, 0x9e, 0x6f, 0xe0, 0xdc, 0x84, 0xb9, 0x38, 0x1d, 0x4b, 0x62, 0xfe, 0x88, 0xc8, - 0xca, 0x3e, 0xd3, 0x08, 0x70, 0x5a, 0x18, 0x01, 0xfe, 0x08, 0x96, 0x82, 0xa8, 0x13, 0x67, 0x2b, - 0x33, 0xfc, 0x35, 0x95, 0x68, 0x91, 0x18, 0x82, 0x8b, 0xbd, 0x20, 0x96, 0x85, 0x49, 0x5f, 0x53, - 0x16, 0x7d, 0xb3, 0xc1, 0x42, 0x6f, 0x8d, 0x5c, 0xe8, 0xeb, 0x8a, 0xbf, 0x0a, 0xf5, 0xfa, 0xff, - 0x8b, 0xe9, 0x20, 0xf3, 0x7e, 0xe9, 0x5c, 0x4f, 0x7b, 0x04, 0x2a, 0x0d, 0x8d, 0xc9, 0x47, 0x6e, - 0x82, 0xe3, 0xa5, 0x44, 0xc7, 0xd3, 0x36, 0x61, 0x5d, 0x2a, 0x9b, 0x2d, 0x8d, 0x20, 0x4f, 0xd0, - 0x7b, 0xd8, 0xaf, 0xb6, 0xc2, 0x6e, 0xf1, 0x35, 0xb8, 0x1e, 0x83, 0xb1, 0xbb, 0x36, 0x36, 0xdb, - 0x53, 0xe2, 0xb3, 0x3d, 0x6d, 0x83, 0x2a, 0x9f, 0xd0, 0x79, 0x7e, 0x42, 0x97, 0x4f, 0xea, 0x39, - 0x4b, 0x42, 0xcf, 0x49, 0xaf, 0xf1, 0x4d, 0x2e, 0x81, 0x8f, 0xe8, 0x36, 0xff, 0xaa, 0xb0, 0x34, - 0x3b, 0xd0, 0x67, 0xbe, 0x19, 0xef, 0x33, 0x6f, 0x0d, 0x95, 0x19, 0xef, 0x30, 0xbb, 0xb4, 0xc1, - 0x7c, 0x87, 0x2b, 0x61, 0x5f, 0x1a, 0xc9, 0x1e, 0x6f, 0x2d, 0x5f, 0x4f, 0xe8, 0x2c, 0xeb, 0x8d, - 0xd2, 0x5e, 0xc5, 0x38, 0xa9, 0xd1, 0xdf, 0xb0, 0xb3, 0x8c, 0xfa, 0xbc, 0x25, 0x40, 0xa1, 0xe1, - 0x63, 0xdf, 0x21, 0x7d, 0xae, 0xc0, 0x22, 0x07, 0x1e, 0x71, 0x22, 0xe8, 0x1e, 0x2c, 0x91, 0x1a, - 0x8e, 0xfa, 0x88, 0x67, 0x74, 0xb1, 0x6b, 0x10, 0x0c, 0x7b, 0x8b, 0x78, 0xfd, 0xc2, 0x7c, 0xca, - 0x06, 0x43, 0xc7, 0xd8, 0x25, 0x82, 0x9f, 0xc3, 0x28, 0x64, 0xfb, 0x3f, 0x0a, 0xcc, 0x56, 0x5b, - 0xd8, 0xf6, 0x89, 0xe1, 0x6b, 0x30, 0xcf, 0x7d, 0xcc, 0x84, 0x36, 0x12, 0xbe, 0x71, 0x0a, 0x36, - 0xa8, 0x6e, 0x0e, 0xfd, 0x02, 0x4a, 0x9b, 0x42, 0xe7, 0xb1, 0x0f, 0xb1, 0xb8, 0x79, 0xd0, 0x8b, - 0x03, 0x9c, 0x12, 0x1f, 0x54, 0xef, 0x8c, 0xa0, 0x8a, 0xd6, 0x79, 0x0b, 0xb2, 0xc1, 0x97, 0x39, - 0x68, 0x29, 0xfa, 0x66, 0x28, 0xf6, 0xe1, 0x8e, 0xba, 0x2c, 0x40, 0x43, 0xbe, 0xed, 0xff, 0xce, - 0x00, 0xf4, 0x07, 0x0f, 0xe8, 0x01, 0x5c, 0x8b, 0x7f, 0x61, 0x80, 0xd6, 0x87, 0x7c, 0x88, 0xa2, - 0x6e, 0xc8, 0x91, 0x91, 0x4e, 0x0f, 0xe0, 0x5a, 0xfc, 0x65, 0x55, 0x5f, 0x98, 0xe4, 0xdd, 0x5a, - 0x5f, 0x98, 0xf4, 0xfd, 0xd6, 0x14, 0xea, 0xc0, 0x6a, 0xc2, 0x3b, 0x06, 0xf4, 0xd2, 0x78, 0x2f, - 0x68, 0xd4, 0x97, 0xc7, 0x7c, 0x59, 0xa1, 0x4d, 0x21, 0x17, 0xd6, 0x12, 0x27, 0xe3, 0xe8, 0xee, - 0xb8, 0xb3, 0x7e, 0xf5, 0x95, 0x31, 0x28, 0xa3, 0x35, 0x7b, 0xa0, 0x26, 0x0f, 0x79, 0xd1, 0x2b, - 0x63, 0x4f, 0xb7, 0xd5, 0x57, 0xc7, 0x21, 0x8d, 0x96, 0xdd, 0x87, 0xb9, 0xd8, 0xc0, 0x15, 0xa9, - 0xd2, 0x29, 0x2c, 0x15, 0xbc, 0x3e, 0x64, 0x42, 0x4b, 0x25, 0xc5, 0x86, 0x82, 0x7d, 0x49, 0x83, - 0xd3, 0xcd, 0xbe, 0x24, 0xc9, 0x14, 0x51, 0x34, 0xbf, 0x90, 0x80, 0x65, 0xe6, 0x97, 0x67, 0x70, - 0x99, 0xf9, 0x13, 0xb2, 0xb9, 0x36, 0x85, 0xbe, 0x0f, 0x0b, 0xfc, 0x1c, 0x04, 0x6d, 0x0e, 0x9d, - 0xee, 0xa8, 0x37, 0x92, 0xd0, 0x71, 0x91, 0x7c, 0x13, 0xdb, 0x17, 0x29, 0xed, 0xb8, 0xfb, 0x22, - 0x13, 0x7a, 0xdf, 0x29, 0x92, 0x9f, 0xb8, 0x06, 0xb1, 0x9f, 0x9f, 0x64, 0x3d, 0x71, 0x3f, 0x3f, - 0x49, 0xbb, 0x4a, 0x6d, 0x6a, 0xfb, 0xcb, 0x0c, 0x64, 0x82, 0x44, 0xda, 0x80, 0x17, 0x84, 0x3a, - 0x1b, 0xdd, 0x18, 0xde, 0x8c, 0xa8, 0x37, 0x13, 0xf1, 0x91, 0xba, 0x8f, 0xe8, 0x7d, 0xcc, 0x55, - 0xce, 0x68, 0x2b, 0xce, 0x27, 0xab, 0xde, 0xd5, 0x5b, 0x43, 0x28, 0x44, 0xd9, 0x7c, 0x2e, 0xd8, - 0x1a, 0x55, 0xc2, 0xf1, 0xb2, 0x93, 0xe2, 0xff, 0x13, 0x7a, 0x6f, 0x89, 0x91, 0xaf, 0xf1, 0x7a, - 0x49, 0x63, 0xfe, 0xf6, 0x50, 0x9a, 0x68, 0x85, 0x0a, 0xe4, 0xa2, 0x4a, 0x05, 0x15, 0xe2, 0x3c, - 0xf1, 0x82, 0x46, 0x5d, 0x93, 0x60, 0x98, 0x8c, 0xf4, 0x2f, 0x52, 0x4a, 0xa8, 0xa8, 0x18, 0x23, - 0x9a, 0xc0, 0x26, 0x8b, 0x8e, 0xdb, 0x43, 0x69, 0xe2, 0x51, 0x1d, 0xbb, 0xc2, 0xfb, 0x51, 0x3d, - 0x78, 0xdd, 0xf7, 0xa3, 0x5a, 0x72, 0xe7, 0x6b, 0x53, 0x3b, 0xd9, 0x47, 0xe9, 0xa6, 0x67, 0x9d, - 0x4d, 0x07, 0x1f, 0x87, 0x7e, 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x3b, 0x5a, 0x51, - 0xf0, 0x2c, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD index ccf0881ea3f..198fd220827 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD @@ -11,7 +11,7 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -48,7 +48,7 @@ go_test( "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 2ef1da6d9ed..b6920ba078d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -28,7 +28,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -72,7 +72,7 @@ type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error) // Interface implements an interface for managing labels of a node type Interface interface { - CreateCSINode() (*storagev1beta1.CSINode, error) + CreateCSINode() (*storagev1.CSINode, error) // Updates or Creates the CSINode object with annotations for CSI Migration InitializeCSINodeWithAnnotation() error @@ -379,7 +379,7 @@ func (nim *nodeInfoManager) tryUpdateCSINode( maxAttachLimit int64, topology map[string]string) error { - nodeInfo, err := csiKubeClient.StorageV1beta1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { nodeInfo, err = nim.CreateCSINode() } @@ -412,7 +412,7 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error { } func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient clientset.Interface) error { - nodeInfo, err := csiKubeClient.StorageV1beta1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { // CreateCSINode will set the annotation _, err = nim.CreateCSINode() @@ -424,14 +424,14 @@ func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient cli annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo) if annotationModified { - _, err := csiKubeClient.StorageV1beta1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) return err } return nil } -func (nim *nodeInfoManager) CreateCSINode() (*storagev1beta1.CSINode, error) { +func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { kubeClient := nim.volumeHost.GetKubeClient() if kubeClient == nil { @@ -448,7 +448,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1beta1.CSINode, error) { return nil, err } - nodeInfo := &storagev1beta1.CSINode{ + nodeInfo := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: string(nim.nodeName), OwnerReferences: []metav1.OwnerReference{ @@ -460,17 +460,17 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1beta1.CSINode, error) { }, }, }, - Spec: storagev1beta1.CSINodeSpec{ - Drivers: []storagev1beta1.CSINodeDriver{}, + Spec: storagev1.CSINodeSpec{ + Drivers: []storagev1.CSINodeDriver{}, }, } setMigrationAnnotation(nim.migratedPlugins, nodeInfo) - return csiKubeClient.StorageV1beta1().CSINodes().Create(nodeInfo) + return csiKubeClient.StorageV1().CSINodes().Create(nodeInfo) } -func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1beta1.CSINode) (modified bool) { +func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1.CSINode) (modified bool) { if migratedPlugins == nil { return false } @@ -512,7 +512,7 @@ func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo * } func (nim *nodeInfoManager) installDriverToCSINode( - nodeInfo *storagev1beta1.CSINode, + nodeInfo *storagev1.CSINode, driverName string, driverNodeID string, maxAttachLimit int64, @@ -530,7 +530,7 @@ func (nim *nodeInfoManager) installDriverToCSINode( specModified := true // Clone driver list, omitting the driver that matches the given driverName - newDriverSpecs := []storagev1beta1.CSINodeDriver{} + newDriverSpecs := []storagev1.CSINodeDriver{} for _, driverInfoSpec := range nodeInfo.Spec.Drivers { if driverInfoSpec.Name == driverName { if driverInfoSpec.NodeID == driverNodeID && @@ -550,29 +550,27 @@ func (nim *nodeInfoManager) installDriverToCSINode( } // Append new driver - driverSpec := storagev1beta1.CSINodeDriver{ + driverSpec := storagev1.CSINodeDriver{ Name: driverName, NodeID: driverNodeID, TopologyKeys: topologyKeys.List(), } - if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { - if maxAttachLimit > 0 { - if maxAttachLimit > math.MaxInt32 { - klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32) - maxAttachLimit = math.MaxInt32 - } - m := int32(maxAttachLimit) - driverSpec.Allocatable = &storagev1beta1.VolumeNodeResources{Count: &m} - } else { - klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName) + if maxAttachLimit > 0 { + if maxAttachLimit > math.MaxInt32 { + klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32) + maxAttachLimit = math.MaxInt32 } + m := int32(maxAttachLimit) + driverSpec.Allocatable = &storagev1.VolumeNodeResources{Count: &m} + } else { + klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName) } newDriverSpecs = append(newDriverSpecs, driverSpec) nodeInfo.Spec.Drivers = newDriverSpecs - _, err := csiKubeClient.StorageV1beta1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) return err } @@ -602,7 +600,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( csiKubeClient clientset.Interface, csiDriverName string) error { - nodeInfoClient := csiKubeClient.StorageV1beta1().CSINodes() + nodeInfoClient := csiKubeClient.StorageV1().CSINodes() nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/BUILD new file mode 100644 index 00000000000..4b70f22351a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["plugin_manager.go"], + importpath = "k8s.io/kubernetes/pkg/volume/csimigration", + visibility = ["//visibility:public"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/volume:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate:go_default_library", + "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["plugin_manager_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/volume:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//staging/src/k8s.io/csi-translation-lib:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/plugin_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/plugin_manager.go new file mode 100644 index 00000000000..cab93506823 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csimigration/plugin_manager.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csimigration + +import ( + "errors" + "fmt" + + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" + csilibplugins "k8s.io/csi-translation-lib/plugins" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/volume" +) + +// PluginNameMapper contains utility methods to retrieve names of plugins +// that support a spec, map intree <=> migrated CSI plugin names, etc +type PluginNameMapper interface { + GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) + GetCSINameFromInTreeName(pluginName string) (string, error) +} + +// PluginManager keeps track of migrated state of in-tree plugins +type PluginManager struct { + PluginNameMapper +} + +// NewPluginManager returns a new PluginManager instance +func NewPluginManager(m PluginNameMapper) PluginManager { + return PluginManager{ + PluginNameMapper: m, + } +} + +// IsMigrationCompleteForPlugin indicates whether CSI migration has been completed +// for a particular storage plugin +func (pm PluginManager) IsMigrationCompleteForPlugin(pluginName string) bool { + // CSIMigration feature and plugin specific migration feature flags should + // be enabled for plugin specific migration completion feature flags to be + // take effect + if !pm.IsMigrationEnabledForPlugin(pluginName) { + return false + } + + switch pluginName { + case csilibplugins.AWSEBSInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWSComplete) + case csilibplugins.GCEPDInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCEComplete) + case csilibplugins.AzureFileInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFileComplete) + case csilibplugins.AzureDiskInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDiskComplete) + case csilibplugins.CinderInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStackComplete) + default: + return false + } +} + +// IsMigrationEnabledForPlugin indicates whether CSI migration has been enabled +// for a particular storage plugin +func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool { + // CSIMigration feature should be enabled along with the plugin-specific one + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { + return false + } + + switch pluginName { + case csilibplugins.AWSEBSInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) + case csilibplugins.GCEPDInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) + case csilibplugins.AzureFileInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFile) + case csilibplugins.AzureDiskInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) + case csilibplugins.CinderInTreePluginName: + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) + default: + return false + } +} + +// IsMigratable indicates whether CSI migration has been enabled for a volume +// plugin that the spec refers to +func (pm PluginManager) IsMigratable(spec *volume.Spec) (bool, error) { + if spec == nil { + return false, fmt.Errorf("could not find if plugin is migratable because volume spec is nil") + } + + pluginName, _ := pm.GetInTreePluginNameFromSpec(spec.PersistentVolume, spec.Volume) + if pluginName == "" { + return false, nil + } + // found an in-tree plugin that supports the spec + return pm.IsMigrationEnabledForPlugin(pluginName), nil +} + +// InTreeToCSITranslator performs translation of Volume sources for PV and Volume objects +// from references to in-tree plugins to migrated CSI plugins +type InTreeToCSITranslator interface { + TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) + TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) +} + +// TranslateInTreeSpecToCSI translates a volume spec (either PV or inline volume) +// supported by an in-tree plugin to CSI +func TranslateInTreeSpecToCSI(spec *volume.Spec, translator InTreeToCSITranslator) (*volume.Spec, error) { + var csiPV *v1.PersistentVolume + var err error + inlineVolume := false + if spec.PersistentVolume != nil { + csiPV, err = translator.TranslateInTreePVToCSI(spec.PersistentVolume) + } else if spec.Volume != nil { + csiPV, err = translator.TranslateInTreeInlineVolumeToCSI(spec.Volume) + inlineVolume = true + } else { + err = errors.New("not a valid volume spec") + } + if err != nil { + return nil, fmt.Errorf("failed to translate in-tree pv to CSI: %v", err) + } + return &volume.Spec{ + PersistentVolume: csiPV, + ReadOnly: spec.ReadOnly, + InlineVolumeSpecForCSIMigration: inlineVolume, + }, nil +} + +// CheckMigrationFeatureFlags checks the configuration of feature flags related +// to CSI Migration is valid +func CheckMigrationFeatureFlags(f featuregate.FeatureGate, pluginMigration, pluginMigrationComplete featuregate.Feature) error { + if f.Enabled(pluginMigration) && !f.Enabled(features.CSIMigration) { + return fmt.Errorf("enabling %q requires CSIMigration to be enabled", pluginMigration) + } + if f.Enabled(pluginMigrationComplete) && !f.Enabled(pluginMigration) { + return fmt.Errorf("enabling %q requires %q to be enabled", pluginMigrationComplete, pluginMigration) + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS index 93c0d5e773d..8ef14a0c2a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS @@ -4,6 +4,7 @@ approvers: - pmorie - saad-ali - thockin +emeritus_approvers: - matchstick reviewers: - ivan4th diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index 8d2aabb019a..12746696888 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -80,10 +80,6 @@ func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.DownwardAPI != nil } -func (plugin *downwardAPIPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *downwardAPIPlugin) RequiresRemount() bool { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD index 51b6ee31a25..c4a58fda540 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD @@ -17,7 +17,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/emptydir", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/fsquota:go_default_library", @@ -26,8 +25,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], @@ -40,8 +43,18 @@ go_test( srcs = ["empty_dir_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -50,6 +63,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS index 93c0d5e773d..8ef14a0c2a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS @@ -4,6 +4,7 @@ approvers: - pmorie - saad-ali - thockin +emeritus_approvers: - matchstick reviewers: - ivan4th diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go index c42b68ac145..607705aea9f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go @@ -21,17 +21,18 @@ import ( "os" "path/filepath" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/fsquota" - utilstrings "k8s.io/utils/strings" ) // TODO: in the near future, this will be changed to be more restrictive @@ -90,10 +91,6 @@ func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool { return false } -func (plugin *emptyDirPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *emptyDirPlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go index 3f8b1628274..93b48e27dc6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go @@ -23,9 +23,9 @@ import ( "golang.org/x/sys/unix" "k8s.io/klog" + "k8s.io/utils/mount" - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/util/mount" + v1 "k8s.io/api/core/v1" ) // Defined by Linux - the type number for tmpfs mounts. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go index caeb6a77182..b048f76081a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go @@ -19,8 +19,9 @@ limitations under the License. package emptydir import ( - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" ) // realMountDetector pretends to implement mediumer. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD index bec107d7dc7..dd16c9f27b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/fc", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -27,6 +26,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -39,7 +40,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -48,6 +48,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go index 356399330f1..4a431fd9928 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go @@ -23,12 +23,13 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index 602b7aa0bdb..8ad90f11b8f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -20,7 +20,8 @@ import ( "os" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index ee8a09226e4..dfc2aa9d062 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -23,17 +23,19 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -86,10 +88,6 @@ func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool { return (spec.Volume != nil && spec.Volume.FC != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC != nil) } -func (plugin *fcPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *fcPlugin) RequiresRemount() bool { return false } @@ -114,7 +112,7 @@ func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Volu return plugin.newMounterInternal(spec, pod.UID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) { +func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec utilexec.Interface) (volume.Mounter, error) { // fc volumes used directly in a pod have a ReadOnly flag set by the pod author. // fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV fc, readOnly, err := getVolumeSource(spec) @@ -174,7 +172,7 @@ func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ v return plugin.newBlockVolumeMapperInternal(spec, uid, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) { +func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec utilexec.Interface) (volume.BlockVolumeMapper, error) { fc, readOnly, err := getVolumeSource(spec) if err != nil { return nil, err @@ -412,20 +410,13 @@ type fcDiskMapper struct { var _ volume.BlockVolumeMapper = &fcDiskMapper{} -func (b *fcDiskMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *fcDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - type fcDiskUnmapper struct { *fcDisk deviceUtil util.DeviceUtil } var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{} +var _ volume.CustomBlockVolumeUnmapper = &fcDiskUnmapper{} func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { err := c.manager.DetachBlockFCDisk(*c, mapPath, devicePath) @@ -440,6 +431,10 @@ func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { return nil } +func (c *fcDiskUnmapper) UnmapPodDevice() error { + return nil +} + // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/{WWID}/{podUid} func (fc *fcDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index de5d52ca5a0..9fec400e8f1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -24,11 +24,12 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD index 6af91d59242..9cb8aceaa93 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD @@ -30,7 +30,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/flexvolume", deps = [ "//pkg/util/filesystem:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -40,6 +39,7 @@ go_library( "//vendor/github.com/fsnotify/fsnotify:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -60,7 +60,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/util/filesystem:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -72,6 +71,7 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS index 6caf661b0a6..549173db076 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS @@ -3,6 +3,7 @@ approvers: - chakri-nelluri - saad-ali +emeritus_approvers: - MikaelCluseau reviewers: - ivan4th @@ -14,4 +15,3 @@ reviewers: - jingxu97 - msau42 - chakri-nelluri -- MikaelCluseau diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go index 3c82f264b91..876ddc00b91 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go @@ -20,9 +20,9 @@ import ( "time" "k8s.io/klog" + "k8s.io/utils/mount" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go index 134f26da059..323164300b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go @@ -19,9 +19,10 @@ package flexvolume import ( "time" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + "k8s.io/apimachinery/pkg/types" ) type detacherDefaults flexVolumeDetacher diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go index 39ff9770436..6fe368b2437 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go @@ -20,9 +20,10 @@ import ( "fmt" "os" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go index 6eac275c9dc..189a40f1d62 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go @@ -52,7 +52,6 @@ const ( optionReadWrite = "kubernetes.io/readwrite" optionKeySecret = "kubernetes.io/secret" optionFSGroup = "kubernetes.io/mounterArgs.FsGroup" - optionMountsDir = "kubernetes.io/mountsDir" optionPVorVolumeName = "kubernetes.io/pvOrVolumeName" optionKeyPodName = "kubernetes.io/pod.name" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go index 10fccbf17e6..6ca9ba7dc55 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go @@ -26,10 +26,10 @@ import ( api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" + "k8s.io/utils/mount" utilstrings "k8s.io/utils/strings" ) @@ -148,10 +148,6 @@ func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool { return sourceDriver == plugin.driverName } -func (plugin *flexVolumePlugin) IsMigratedToCSI() bool { - return false -} - // RequiresRemount is part of the volume.VolumePlugin interface. func (plugin *flexVolumePlugin) RequiresRemount() bool { return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go index e3f05f0ec87..8eca3ad9748 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go @@ -18,7 +18,7 @@ package flexvolume import ( "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) type unmounterDefaults flexVolumeUnmounter diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go index 9435f2c2c46..7970cb77a2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go @@ -21,9 +21,10 @@ import ( "os" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume" "k8s.io/utils/exec" + "k8s.io/utils/mount" + + "k8s.io/kubernetes/pkg/volume" ) // FlexVolumeUnmounter is the disk that will be cleaned by this plugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go index 4a3019e25e3..c38b858e6f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go @@ -22,7 +22,8 @@ import ( "os" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go index 7e718f072a2..5f0c47542b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go @@ -17,10 +17,11 @@ limitations under the License. package flexvolume import ( + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - utilstrings "k8s.io/utils/strings" ) type flexVolume struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD index 2f6c2b70a21..276ada39eed 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD @@ -17,7 +17,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/flocker", deps = [ "//pkg/util/env:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -28,6 +27,7 @@ go_library( "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//vendor/github.com/clusterhq/flocker-go:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -41,7 +41,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -50,6 +49,7 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/clusterhq/flocker-go:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS index 0de8788a33e..e5d361af51a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS @@ -4,7 +4,6 @@ approvers: - rootfs - saad-ali reviewers: -- agonzalezro - simonswine - saad-ali - jsafrane diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go index c69a96776d0..723b28de4db 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go @@ -23,14 +23,14 @@ import ( "time" flockerapi "github.com/clusterhq/flocker-go" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" "k8s.io/kubernetes/pkg/util/env" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -107,10 +107,6 @@ func (p *flockerPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.Flocker != nil) } -func (p *flockerPlugin) IsMigratedToCSI() bool { - return false -} - func (p *flockerPlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD index 76ab8ca354f..cff495a05d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/gcepd", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -34,6 +33,7 @@ go_library( "//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/path:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], @@ -49,7 +49,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -63,6 +62,7 @@ go_test( "//staging/src/k8s.io/cloud-provider/volume:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS index e4367f0e2ba..3076f706348 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS @@ -8,7 +8,6 @@ reviewers: - saad-ali - jsafrane - jingxu97 -- matchstick - gnufied - msau42 - verult diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go index 9d67f67f584..43c03564c59 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go @@ -29,11 +29,13 @@ import ( "strconv" "time" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/gce" @@ -179,10 +181,10 @@ func (attacher *gcePersistentDiskAttacher) BulkVerifyVolumes(volumesByNode map[t } // search Windows disk number by LUN -func getDiskID(pdName string, exec mount.Exec) (string, error) { +func getDiskID(pdName string, exec utilexec.Interface) (string, error) { // TODO: replace Get-GcePdName with native windows support of Get-Disk, see issue #74674 cmd := `Get-GcePdName | select Name, DeviceId | ConvertTo-Json` - output, err := exec.Run("powershell", "/c", cmd) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() if err != nil { klog.Errorf("Get-GcePdName failed, error: %v, output: %q", err, string(output)) err = errors.New(err.Error() + " " + string(output)) @@ -384,6 +386,14 @@ func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName ty } func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error { + if runtime.GOOS == "windows" { + // Flush data cache for windows because it does not do so automatically during unmount device + exec := detacher.host.GetExec(gcePersistentDiskPluginName) + err := volumeutil.WriteVolumeCache(deviceMountPath, exec) + if err != nil { + return err + } + } return mount.CleanupMountPoint(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName), false) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go index 56a70d22f59..a1f4eff61b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go @@ -27,19 +27,20 @@ import ( "strconv" "strings" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" gcecloud "k8s.io/legacy-cloud-providers/gce" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -100,11 +101,6 @@ func (plugin *gcePersistentDiskPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil) } -func (plugin *gcePersistentDiskPlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && - utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) -} - func (plugin *gcePersistentDiskPlugin) RequiresRemount() bool { return false } @@ -382,8 +378,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.Mounte notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) klog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly) if err != nil && !os.IsNotExist(err) { - klog.Errorf("cannot validate mount point: %s %v", dir, err) - return err + return fmt.Errorf("cannot validate mount point: %s %v", dir, err) } if !notMnt { return nil @@ -392,8 +387,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.Mounte if runtime.GOOS != "windows" { // in windows, we will use mklink to mount, will MkdirAll in Mount func if err := os.MkdirAll(dir, 0750); err != nil { - klog.Errorf("mkdir failed on disk %s (%v)", dir, err) - return err + return fmt.Errorf("mkdir failed on disk %s (%v)", dir, err) } } @@ -412,35 +406,32 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.Mounte if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err + return fmt.Errorf("failed to mount: %v. Cleanup IsLikelyNotMountPoint check failed: %v", err, mntErr) } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - klog.Errorf("Failed to unmount: %v", mntErr) - return err + return fmt.Errorf("failed to mount: %v. Cleanup failed to unmount: %v", err, mntErr) } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err + return fmt.Errorf("failed to mount: %v. Cleanup IsLikelyNotMountPoint check failed: %v", err, mntErr) } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) - return err + return fmt.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop", dir) } } - os.Remove(dir) - klog.Errorf("Mount of disk %s failed: %v", dir, err) - return err + mntErr = os.Remove(dir) + if mntErr != nil { + return fmt.Errorf("failed to mount: %v. Cleanup os Remove(%s) failed: %v", err, dir, mntErr) + } + + return fmt.Errorf("mount of disk %s failed: %v", dir, err) } if !b.readOnly { volume.SetVolumeOwnership(b, mounterArgs.FsGroup) } - - klog.V(4).Infof("Successfully mounted %s", dir) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go index 671f5d7178d..1f5be234734 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go @@ -23,14 +23,15 @@ import ( "path/filepath" "strconv" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) var _ volume.VolumePlugin = &gcePersistentDiskPlugin{} @@ -54,10 +55,10 @@ func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -68,6 +69,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock gceVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ @@ -132,10 +136,6 @@ func (plugin *gcePersistentDiskPlugin) newUnmapperInternal(volName string, podUI }}, nil } -func (c *gcePersistentDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { - return nil -} - type gcePersistentDiskUnmapper struct { *gcePersistentDisk } @@ -149,14 +149,6 @@ type gcePersistentDiskMapper struct { var _ volume.BlockVolumeMapper = &gcePersistentDiskMapper{} -func (b *gcePersistentDiskMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *gcePersistentDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/pdName func (pd *gcePersistentDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go index 146b208001d..ec2ab90db72 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go @@ -25,18 +25,19 @@ import ( "strings" "time" + "k8s.io/klog" + "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilpath "k8s.io/utils/path" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" cloudvolume "k8s.io/cloud-provider/volume" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" gcecloud "k8s.io/legacy-cloud-providers/gce" - "k8s.io/utils/exec" - utilpath "k8s.io/utils/path" ) const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go index 932296c2845..aee38f5bbf9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go @@ -77,10 +77,6 @@ func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.GitRepo != nil } -func (plugin *gitRepoPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *gitRepoPlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD index 5774416506f..a7b6e671934 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD @@ -17,7 +17,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/glusterfs", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -33,6 +32,7 @@ go_library( "//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -45,7 +45,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -56,6 +55,7 @@ go_test( "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index 52ff8cb1328..8067ba83344 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -20,6 +20,7 @@ import ( "fmt" "math" "math/rand" + "net" "os" "path/filepath" "runtime" @@ -29,7 +30,11 @@ import ( gcli "github.com/heketi/heketi/client/api/go-client" gapi "github.com/heketi/heketi/pkg/glusterfs/api" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,12 +44,9 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -88,13 +90,12 @@ const ( // default values, but they play a different role and // could take a different value. Only thing we need is: // absGidMin <= defGidMin <= defGidMax <= absGidMax - absoluteGidMin = 2000 - absoluteGidMax = math.MaxInt32 - linuxGlusterMountBinary = "mount.glusterfs" - heketiAnn = "heketi-dynamic-provisioner" - glusterTypeAnn = "gluster.org/type" - glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV" - heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id" + absoluteGidMin = 2000 + absoluteGidMax = math.MaxInt32 + heketiAnn = "heketi-dynamic-provisioner" + glusterTypeAnn = "gluster.org/type" + glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV" + heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id" // Error string returned by heketi errIDNotFound = "Id not found" @@ -118,10 +119,6 @@ func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.Glusterfs != nil) } -func (plugin *glusterfsPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *glusterfsPlugin) RequiresRemount() bool { return false } @@ -264,7 +261,7 @@ func (b *glusterfsMounter) CanMount() error { exe := b.plugin.host.GetExec(b.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - if _, err := exe.Run("test", "-x", gciLinuxGlusterMountBinaryPath); err != nil { + if _, err := exe.Command("test", "-x", gciLinuxGlusterMountBinaryPath).CombinedOutput(); err != nil { return fmt.Errorf("required binary %s is missing", gciLinuxGlusterMountBinaryPath) } } @@ -969,6 +966,11 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, return nil, fmt.Errorf("failed to get host ipaddress: %v", err) } ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "") + // IP validates if a string is a valid IP address. + ip := net.ParseIP(ipaddr) + if ip == nil { + return nil, fmt.Errorf("glusterfs server node ip address %s must be a valid IP address, (e.g. 10.9.8.7)", ipaddr) + } dynamicHostIps = append(dynamicHostIps, ipaddr) } klog.V(3).Infof("host list :%v", dynamicHostIps) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/BUILD index 37cbe37ea01..0c2bd65bf3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/BUILD @@ -14,7 +14,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/hostpath", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -24,6 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go index ffc4e7d7bdf..af10f49ed54 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go @@ -21,11 +21,12 @@ import ( "os" "regexp" + "k8s.io/utils/mount" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" @@ -84,10 +85,6 @@ func (plugin *hostPathPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.HostPath != nil) } -func (plugin *hostPathPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *hostPathPlugin) RequiresRemount() bool { return false } @@ -489,7 +486,9 @@ func makeDir(pathname string) error { // If pathname already exists, whether a file or directory, no error is returned. func makeFile(pathname string) error { f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644)) - defer f.Close() + if f != nil { + f.Close() + } if err != nil { if !os.IsExist(err) { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD index 48208d1f705..60d8dd0b0e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/config:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -30,6 +29,7 @@ go_library( "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -42,7 +42,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -50,6 +49,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go index 5231414116c..7b3d439077e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go @@ -21,15 +21,16 @@ import ( "os" "time" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/keymutex" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/utils/keymutex" ) type iscsiAttacher struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index aff2045d7ff..dffd7050e46 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -20,7 +20,8 @@ import ( "os" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index 97fa17c0e18..957a90a331a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -23,16 +23,18 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/keymutex" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" ioutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - "k8s.io/utils/keymutex" - utilstrings "k8s.io/utils/strings" ) // This is the primary entrypoint for volume plugins. @@ -76,10 +78,6 @@ func (plugin *iscsiPlugin) CanSupport(spec *volume.Spec) bool { return (spec.Volume != nil && spec.Volume.ISCSI != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.ISCSI != nil) } -func (plugin *iscsiPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *iscsiPlugin) RequiresRemount() bool { return false } @@ -110,7 +108,7 @@ func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.V return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) } -func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) { +func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec utilexec.Interface, secret map[string]string) (volume.Mounter, error) { readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err @@ -153,7 +151,7 @@ func (plugin *iscsiPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, return plugin.newBlockVolumeMapperInternal(spec, uid, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) } -func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.BlockVolumeMapper, error) { +func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec utilexec.Interface, secret map[string]string) (volume.BlockVolumeMapper, error) { readOnly, _, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err @@ -174,7 +172,7 @@ func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volum return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) { +func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface, exec utilexec.Interface) (volume.Unmounter, error) { return &iscsiDiskUnmounter{ iscsiDisk: &iscsiDisk{ podUID: podUID, @@ -194,7 +192,7 @@ func (plugin *iscsiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.U return plugin.newUnmapperInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *iscsiPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager, exec mount.Exec) (volume.BlockVolumeUnmapper, error) { +func (plugin *iscsiPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager, exec utilexec.Interface) (volume.BlockVolumeUnmapper, error) { return &iscsiDiskUnmapper{ iscsiDisk: &iscsiDisk{ podUID: podUID, @@ -318,7 +316,7 @@ type iscsiDiskMounter struct { fsType string volumeMode v1.PersistentVolumeMode mounter *mount.SafeFormatAndMount - exec mount.Exec + exec utilexec.Interface deviceUtil ioutil.DeviceUtil mountOptions []string } @@ -356,7 +354,7 @@ func (b *iscsiDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) e type iscsiDiskUnmounter struct { *iscsiDisk mounter mount.Interface - exec mount.Exec + exec utilexec.Interface deviceUtil ioutil.DeviceUtil } @@ -376,27 +374,20 @@ func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { type iscsiDiskMapper struct { *iscsiDisk readOnly bool - exec mount.Exec + exec utilexec.Interface deviceUtil ioutil.DeviceUtil } var _ volume.BlockVolumeMapper = &iscsiDiskMapper{} -func (b *iscsiDiskMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (b *iscsiDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - type iscsiDiskUnmapper struct { *iscsiDisk - exec mount.Exec + exec utilexec.Interface deviceUtil ioutil.DeviceUtil } var _ volume.BlockVolumeUnmapper = &iscsiDiskUnmapper{} +var _ volume.CustomBlockVolumeUnmapper = &iscsiDiskUnmapper{} // Even though iSCSI plugin has attacher/detacher implementation, iSCSI plugin // needs volume detach operation during TearDownDevice(). This method is only @@ -415,6 +406,10 @@ func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error { return nil } +func (c *iscsiDiskUnmapper) UnmapPodDevice() error { + return nil +} + // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} func (iscsi *iscsiDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go index ebc0dee3903..f8742d9f716 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go @@ -18,24 +18,27 @@ package iscsi import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "path/filepath" "regexp" + "sort" "strconv" "strings" "time" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - utilexec "k8s.io/utils/exec" ) const ( @@ -86,15 +89,16 @@ func updateISCSIDiscoverydb(b iscsiDiskMounter, tp string) error { if !b.chapDiscovery { return nil } - out, err := b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", "discovery.sendtargets.auth.authmethod", "-v", "CHAP") + out, err := execWithLog(b, "iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", "discovery.sendtargets.auth.authmethod", "-v", "CHAP") if err != nil { - return fmt.Errorf("iscsi: failed to update discoverydb with CHAP, output: %v", string(out)) + return fmt.Errorf("iscsi: failed to update discoverydb with CHAP, output: %v", out) } for _, k := range chapSt { v := b.secret[k] if len(v) > 0 { - out, err := b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", k, "-v", v) + // explicitly not using execWithLog so secrets are not logged + out, err := b.exec.Command("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", k, "-v", v).CombinedOutput() if err != nil { return fmt.Errorf("iscsi: failed to update discoverydb key %q error: %v", k, string(out)) } @@ -108,15 +112,16 @@ func updateISCSINode(b iscsiDiskMounter, tp string) error { return nil } - out, err := b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "-o", "update", "-n", "node.session.auth.authmethod", "-v", "CHAP") + out, err := execWithLog(b, "iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "-o", "update", "-n", "node.session.auth.authmethod", "-v", "CHAP") if err != nil { - return fmt.Errorf("iscsi: failed to update node with CHAP, output: %v", string(out)) + return fmt.Errorf("iscsi: failed to update node with CHAP, output: %v", out) } for _, k := range chapSess { v := b.secret[k] if len(v) > 0 { - out, err := b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "-o", "update", "-n", k, "-v", v) + // explicitly not using execWithLog so secrets are not logged + out, err := b.exec.Command("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "-o", "update", "-n", k, "-v", v).CombinedOutput() if err != nil { return fmt.Errorf("iscsi: failed to update node session key %q error: %v", k, string(out)) } @@ -254,7 +259,7 @@ func waitForMultiPathToExist(devicePaths []string, maxRetries int, deviceUtil vo for i := 0; i < maxRetries; i++ { for _, path := range devicePaths { - // There shouldnt be any empty device paths. However adding this check + // There shouldn't be any empty device paths. However adding this check // for safer side to avoid the possibility of an empty entry. if path == "" { continue @@ -279,13 +284,13 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { var iscsiTransport string var lastErr error - out, err := b.exec.Run("iscsiadm", "-m", "iface", "-I", b.InitIface, "-o", "show") + out, err := execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.InitIface, "-o", "show") if err != nil { - klog.Errorf("iscsi: could not read iface %s error: %s", b.InitIface, string(out)) + klog.Errorf("iscsi: could not read iface %s error: %s", b.InitIface, out) return "", err } - iscsiTransport = extractTransportname(string(out)) + iscsiTransport = extractTransportname(out) bkpPortal := b.Portals @@ -323,7 +328,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { klog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp) // build discoverydb and discover iscsi target - b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new") + execWithLog(b, "iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new") // update discoverydb with CHAP secret err = updateISCSIDiscoverydb(b, tp) @@ -332,11 +337,11 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { continue } - out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover") + out, err = execWithLog(b, "iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover") if err != nil { // delete discoverydb record - b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete") - lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err) + execWithLog(b, "iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete") + lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, out, err) continue } @@ -348,16 +353,16 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { } // login to iscsi target - out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login") + out, err = execWithLog(b, "iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login") if err != nil { // delete the node record from database - b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete") - lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err) + execWithLog(b, "iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete") + lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", out, err) continue } // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot - out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual") + out, err = execWithLog(b, "iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual") if err != nil { // don't fail if we can't set startup mode, but log warning so there is a clue klog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) @@ -400,9 +405,10 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { } if exist := waitForPathToExist(&devicePath, deviceDiscoveryTimeout, iscsiTransport); !exist { - klog.Errorf("Could not attach disk: Timeout after %ds", deviceDiscoveryTimeout) + msg := fmt.Sprintf("Timed out waiting for device at path %s after %ds", devicePath, deviceDiscoveryTimeout) + klog.Error(msg) // update last error - lastErr = fmt.Errorf("Could not attach disk: Timeout after %ds", deviceDiscoveryTimeout) + lastErr = errors.New(msg) continue } else { devicePaths[tp] = devicePath @@ -412,7 +418,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { if len(devicePaths) == 0 { // No path attached, report error and stop trying. kubelet will try again in a short while // delete cloned iface - b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") + execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") klog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) } @@ -557,7 +563,7 @@ func deleteDevices(c iscsiDiskUnmounter) error { } // Flush any multipath device maps for mpathDevice := range mpathDevices { - _, err = c.exec.Run("multipath", "-f", mpathDevice) + _, err = c.exec.Command("multipath", "-f", mpathDevice).CombinedOutput() if err != nil { klog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err) // Fall through -- keep deleting the block devices @@ -718,7 +724,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) return nil } -func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, iface, volName, initiatorName string, found bool) error { +func (util *ISCSIUtil) detachISCSIDisk(exec utilexec.Interface, portals []string, iqn, iface, volName, initiatorName string, found bool) error { for _, portal := range portals { logoutArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"} deleteArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "-o", "delete"} @@ -727,7 +733,7 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i deleteArgs = append(deleteArgs, []string{"-I", iface}...) } klog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) - out, err := exec.Run("iscsiadm", logoutArgs...) + out, err := exec.Command("iscsiadm", logoutArgs...).CombinedOutput() err = ignoreExitCodes(err, exit_ISCSI_ERR_NO_OBJS_FOUND, exit_ISCSI_ERR_SESS_NOT_FOUND) if err != nil { klog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) @@ -735,7 +741,7 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i } // Delete the node record klog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) - out, err = exec.Run("iscsiadm", deleteArgs...) + out, err = exec.Command("iscsiadm", deleteArgs...).CombinedOutput() err = ignoreExitCodes(err, exit_ISCSI_ERR_NO_OBJS_FOUND, exit_ISCSI_ERR_SESS_NOT_FOUND) if err != nil { klog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) @@ -746,7 +752,7 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i // If the iface is not created via iscsi plugin, skip to delete if initiatorName != "" && found && iface == (portals[0]+":"+volName) { deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"} - out, err := exec.Run("iscsiadm", deleteArgs...) + out, err := exec.Command("iscsiadm", deleteArgs...).CombinedOutput() err = ignoreExitCodes(err, exit_ISCSI_ERR_NO_OBJS_FOUND, exit_ISCSI_ERR_SESS_NOT_FOUND) if err != nil { klog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) @@ -792,7 +798,7 @@ func extractDeviceAndPrefix(mntPath string) (string, string, error) { func extractIface(mntPath string) (string, bool) { reOutput := ifaceRe.FindStringSubmatch(mntPath) - if reOutput != nil { + if reOutput != nil && len(reOutput) > 1 { return reOutput[1], true } @@ -856,36 +862,42 @@ func cloneIface(b iscsiDiskMounter) error { return fmt.Errorf("iscsi: cannot clone iface with same name: %s", b.InitIface) } // get pre-configured iface records - out, err := b.exec.Run("iscsiadm", "-m", "iface", "-I", b.InitIface, "-o", "show") + out, err := execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.InitIface, "-o", "show") if err != nil { - lastErr = fmt.Errorf("iscsi: failed to show iface records: %s (%v)", string(out), err) + lastErr = fmt.Errorf("iscsi: failed to show iface records: %s (%v)", out, err) return lastErr } // parse obtained records - params, err := parseIscsiadmShow(string(out)) + params, err := parseIscsiadmShow(out) if err != nil { - lastErr = fmt.Errorf("iscsi: failed to parse iface records: %s (%v)", string(out), err) + lastErr = fmt.Errorf("iscsi: failed to parse iface records: %s (%v)", out, err) return lastErr } // update initiatorname params["iface.initiatorname"] = b.InitiatorName // create new iface - out, err = b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "new") + out, err = execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "new") if err != nil { exit, ok := err.(utilexec.ExitError) if ok && exit.ExitStatus() == iscsiadmErrorSessExists { klog.Infof("iscsi: there is a session already logged in with iface %s", b.Iface) } else { - lastErr = fmt.Errorf("iscsi: failed to create new iface: %s (%v)", string(out), err) + lastErr = fmt.Errorf("iscsi: failed to create new iface: %s (%v)", out, err) return lastErr } } + // Get and sort keys to maintain a stable iteration order + var keys []string + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) // update new iface records - for key, val := range params { - _, err = b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "update", "-n", key, "-v", val) + for _, key := range keys { + _, err = execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "update", "-n", key, "-v", params[key]) if err != nil { - b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") - lastErr = fmt.Errorf("iscsi: failed to update iface records: %s (%v). iface(%s) will be used", string(out), err, b.InitIface) + execWithLog(b, "iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") + lastErr = fmt.Errorf("iscsi: failed to update iface records: %s (%v). iface(%s) will be used", out, err, b.InitIface) break } } @@ -958,3 +970,14 @@ func ignoreExitCodes(err error, ignoredExitCodes ...int) error { } return err } + +func execWithLog(b iscsiDiskMounter, cmd string, args ...string) (string, error) { + start := time.Now() + out, err := b.exec.Command(cmd, args...).CombinedOutput() + if klog.V(5) { + d := time.Since(start) + klog.V(5).Infof("Executed %s %v in %v, err: %v", cmd, args, d, err) + klog.V(5).Infof("Output: %s", string(out)) + } + return string(out), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD index 57cec77324c..1fcbe6c957f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD @@ -10,7 +10,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/events:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -21,6 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -33,8 +33,17 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util/hostutil:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], "@io_bazel_rules_go//go/platform:darwin": [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -42,9 +51,19 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util/hostutil:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -52,9 +71,9 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util/hostutil:go_default_library", @@ -62,6 +81,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index faef40d7afd..b79180a6c87 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -30,12 +30,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/kubelet/events" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/validation" "k8s.io/utils/keymutex" + "k8s.io/utils/mount" utilstrings "k8s.io/utils/strings" ) @@ -83,10 +83,6 @@ func (plugin *localVolumePlugin) CanSupport(spec *volume.Spec) bool { return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Local != nil) } -func (plugin *localVolumePlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *localVolumePlugin) RequiresRemount() bool { return false } @@ -191,6 +187,34 @@ func (plugin *localVolumePlugin) NewBlockVolumeUnmapper(volName string, // TODO: check if no path and no topology constraints are ok func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { fs := v1.PersistentVolumeFilesystem + // The main purpose of reconstructed volume is to clean unused mount points + // and directories. + // For filesystem volume with directory source, no global mount path is + // needed to clean. Empty path is ok. + // For filesystem volume with block source, we should resolve to its device + // path if global mount path exists. + var path string + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + refs, err := mounter.GetMountRefs(mountPath) + if err != nil { + return nil, err + } + baseMountPath := plugin.generateBlockDeviceBaseGlobalPath() + for _, ref := range refs { + if mount.PathWithinBase(ref, baseMountPath) { + // If the global mount for block device exists, the source is block + // device. + // The resolved device path may not be the exact same as path in + // local PV object if symbolic link is used. However, it's the true + // source and can be used in reconstructed volume. + path, _, err = mount.GetDeviceNameFromMount(mounter, ref) + if err != nil { + return nil, err + } + klog.V(4).Infof("local: reconstructing volume %q (pod volume mount: %q) with device %q", volumeName, mountPath, path) + break + } + } localVolume := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: volumeName, @@ -198,7 +222,7 @@ func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath strin Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ - Path: "", + Path: path, }, }, VolumeMode: &fs, @@ -218,6 +242,7 @@ func (plugin *localVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volu Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ + // Not needed because we don't need to detach local device from the host. Path: "", }, }, @@ -583,16 +608,18 @@ type localVolumeMapper struct { } var _ volume.BlockVolumeMapper = &localVolumeMapper{} +var _ volume.CustomBlockVolumeMapper = &localVolumeMapper{} -// SetUpDevice provides physical device path for the local PV. -func (m *localVolumeMapper) SetUpDevice() (string, error) { - globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) - klog.V(4).Infof("SetupDevice returning path %s", globalPath) - return globalPath, nil +// SetUpDevice prepares the volume to the node by the plugin specific way. +func (m *localVolumeMapper) SetUpDevice() error { + return nil } -func (m *localVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) +// MapPodDevice provides physical device path for the local PV. +func (m *localVolumeMapper) MapPodDevice() (string, error) { + globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) + klog.V(4).Infof("MapPodDevice returning path %s", globalPath) + return globalPath, nil } // localVolumeUnmapper implements the BlockVolumeUnmapper interface for local volumes. @@ -602,12 +629,6 @@ type localVolumeUnmapper struct { var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{} -// TearDownDevice will undo SetUpDevice procedure. In local PV, all of this already handled by operation_generator. -func (u *localVolumeUnmapper) TearDownDevice(mapPath, _ string) error { - klog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath) - return nil -} - // GetGlobalMapPath returns global map path and error. // path: plugins/kubernetes.io/kubernetes.io/local-volume/volumeDevices/{volumeName} func (l *localVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD index 42baf1177e3..624a7898a2e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD @@ -14,7 +14,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/nfs", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/recyclerclient:go_default_library", @@ -22,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -31,7 +31,6 @@ go_test( srcs = ["nfs_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -39,6 +38,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go index caf52d56ef0..41b2e1dd9bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go @@ -21,15 +21,16 @@ import ( "os" "runtime" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" - utilstrings "k8s.io/utils/strings" ) func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -89,10 +90,6 @@ func (plugin *nfsPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.NFS != nil) } -func (plugin *nfsPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *nfsPlugin) RequiresRemount() bool { return false } @@ -206,15 +203,15 @@ func (nfsMounter *nfsMounter) CanMount() error { exec := nfsMounter.plugin.host.GetExec(nfsMounter.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - if _, err := exec.Run("test", "-x", "/sbin/mount.nfs"); err != nil { + if _, err := exec.Command("test", "-x", "/sbin/mount.nfs").CombinedOutput(); err != nil { return fmt.Errorf("Required binary /sbin/mount.nfs is missing") } - if _, err := exec.Run("test", "-x", "/sbin/mount.nfs4"); err != nil { + if _, err := exec.Command("test", "-x", "/sbin/mount.nfs4").CombinedOutput(); err != nil { return fmt.Errorf("Required binary /sbin/mount.nfs4 is missing") } return nil case "darwin": - if _, err := exec.Run("test", "-x", "/sbin/mount_nfs"); err != nil { + if _, err := exec.Command("test", "-x", "/sbin/mount_nfs").CombinedOutput(); err != nil { return fmt.Errorf("Required binary /sbin/mount_nfs is missing") } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go index e4052d31387..3d3d5e1dfd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go @@ -48,10 +48,6 @@ func (n *noopExpandableVolumePluginInstance) CanSupport(spec *Spec) bool { return true } -func (n *noopExpandableVolumePluginInstance) IsMigratedToCSI() bool { - return false -} - func (n *noopExpandableVolumePluginInstance) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 406168e9055..7393c682e1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -22,6 +22,10 @@ import ( "strings" "sync" + "k8s.io/klog" + "k8s.io/utils/exec" + "k8s.io/utils/mount" + authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -32,13 +36,12 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + storagelistersv1 "k8s.io/client-go/listers/storage/v1" storagelisters "k8s.io/client-go/listers/storage/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" "k8s.io/kubernetes/pkg/volume/util/subpath" @@ -156,10 +159,6 @@ type VolumePlugin interface { // const. CanSupport(spec *Spec) bool - // IsMigratedToCSI tests whether a CSIDriver implements this plugin's - // functionality - IsMigratedToCSI() bool - // RequiresRemount returns true if this plugin requires mount calls to be // reexecuted. Atomically updating volumes, like Downward API, depend on // this to update the contents of the volume. @@ -350,7 +349,7 @@ type KubeletVolumeHost interface { // to access methods on the Attach Detach Controller. type AttachDetachVolumeHost interface { // CSINodeLister returns the informer lister for the CSINode API Object - CSINodeLister() storagelisters.CSINodeLister + CSINodeLister() storagelistersv1.CSINodeLister // CSIDriverLister returns the informer lister for the CSIDriver API Object CSIDriverLister() storagelisters.CSIDriverLister @@ -436,7 +435,7 @@ type VolumeHost interface { DeleteServiceAccountTokenFunc() func(podUID types.UID) // Returns an interface that should be used to execute any utilities in volume plugins - GetExec(pluginName string) mount.Exec + GetExec(pluginName string) exec.Interface // Returns the labels on the node GetNodeLabels() (map[string]string, error) @@ -690,39 +689,6 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) { return matches[0], nil } -// IsPluginMigratableBySpec looks for a plugin that can support a given volume -// specification and whether that plugin is Migratable. If no plugins can -// support or more than one plugin can support it, return error. -func (pm *VolumePluginMgr) IsPluginMigratableBySpec(spec *Spec) (bool, error) { - pm.mutex.Lock() - defer pm.mutex.Unlock() - - if spec == nil { - return false, fmt.Errorf("could not find if plugin is migratable because volume spec is nil") - } - - matches := []VolumePlugin{} - for _, v := range pm.plugins { - if v.CanSupport(spec) { - matches = append(matches, v) - } - } - - if len(matches) == 0 { - // Not a known plugin (flex) in which case it is not migratable - return false, nil - } - if len(matches) > 1 { - matchedPluginNames := []string{} - for _, plugin := range matches { - matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName()) - } - return false, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ",")) - } - - return matches[0].IsMigratedToCSI(), nil -} - // FindPluginByName fetches a plugin by name or by legacy name. If no plugin // is found, returns error. func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD index 6925d5f355e..06f8c232f02 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD @@ -11,13 +11,13 @@ go_test( srcs = ["portworx_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -31,7 +31,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/portworx", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -45,6 +44,7 @@ go_library( "//vendor/github.com/libopenstorage/openstorage/api/spec:go_default_library", "//vendor/github.com/libopenstorage/openstorage/volume:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index c9c30f4bd49..da6cfbe7088 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -21,15 +21,16 @@ import ( "os" volumeclient "github.com/libopenstorage/openstorage/api/client/volume" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) const ( @@ -95,10 +96,6 @@ func (plugin *portworxVolumePlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.PortworxVolume != nil) } -func (plugin *portworxVolumePlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *portworxVolumePlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go index 7aaee7138ac..65e1ac5e2f1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go @@ -95,10 +95,6 @@ func (plugin *projectedPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.Projected != nil } -func (plugin *projectedPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *projectedPlugin) RequiresRemount() bool { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD index f194dfe9e1b..60bc5a55ab1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD @@ -15,7 +15,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/quobyte", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -23,9 +22,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/github.com/google/uuid:go_default_library", "//vendor/github.com/quobyte/api:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -35,7 +35,6 @@ go_test( srcs = ["quobyte_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -43,6 +42,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go index 3cfc7a800ec..33d730da522 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go @@ -22,16 +22,17 @@ import ( "path/filepath" gostrings "strings" - "github.com/pborman/uuid" - "k8s.io/api/core/v1" + "github.com/google/uuid" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -102,7 +103,7 @@ func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool { } exec := plugin.host.GetExec(plugin.GetPluginName()) - if out, err := exec.Run("ls", "/sbin/mount.quobyte"); err == nil { + if out, err := exec.Command("ls", "/sbin/mount.quobyte").CombinedOutput(); err == nil { klog.V(4).Infof("quobyte: can support: %s", string(out)) return true } @@ -110,10 +111,6 @@ func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool { return false } -func (plugin *quobytePlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *quobytePlugin) RequiresRemount() bool { return false } @@ -408,7 +405,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision(selectedNode *v1.Node, al } // create random image name - provisioner.volume = fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID()) + provisioner.volume = fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.New().String()) manager := &quobyteVolumeManager{ config: cfg, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go index 69925b4c7c1..60c6f6c031f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go @@ -39,7 +39,7 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv if err != nil { return nil, 0, err } - // Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited) + // Quobyte has the concept of Volumes which doesn't have a specific size (they can grow unlimited) // to simulate a size constraint we set here a Quota for logical space volumeRequest := &quobyteapi.CreateVolumeRequest{ Name: provisioner.volume, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD index 94917607158..09226ab256b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/rbd", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -35,6 +34,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/path:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], @@ -45,7 +46,6 @@ go_test( srcs = ["rbd_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -55,6 +55,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go index 7dbb24b6685..12c2e77c935 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go @@ -21,10 +21,11 @@ import ( "os" "time" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index 93d5f81a141..5c10332606e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -26,10 +26,11 @@ import ( "fmt" "os" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index cd953db711c..a07fe8bf06d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -23,7 +23,12 @@ import ( "regexp" dstrings "strings" - "k8s.io/api/core/v1" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -31,14 +36,11 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" volutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) var ( @@ -72,7 +74,6 @@ const ( rbdDefaultAdminId = "admin" rbdDefaultAdminSecretNamespace = "default" rbdDefaultPool = "rbd" - rbdDefaultUserId = rbdDefaultAdminId ) func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -108,10 +109,6 @@ func (plugin *rbdPlugin) CanSupport(spec *volume.Spec) bool { return (spec.Volume != nil && spec.Volume.RBD != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD != nil) } -func (plugin *rbdPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *rbdPlugin) RequiresRemount() bool { return false } @@ -503,7 +500,7 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ return plugin.newBlockVolumeMapperInternal(spec, uid, &RBDUtil{}, secret, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *rbdPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) { +func (plugin *rbdPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string, mounter mount.Interface, exec utilexec.Interface) (volume.BlockVolumeMapper, error) { mon, err := getVolumeSourceMonitors(spec) if err != nil { return nil, err @@ -771,7 +768,7 @@ type rbd struct { ReadOnly bool plugin *rbdPlugin mounter *mount.SafeFormatAndMount - exec mount.Exec + exec utilexec.Interface // Utility interface that provides API calls to the provider to attach/detach disks. manager diskManager volume.MetricsProvider `json:"-"` @@ -898,6 +895,7 @@ type rbdDiskMapper struct { } var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{} +var _ volume.CustomBlockVolumeUnmapper = &rbdDiskUnmapper{} // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/{rbd pool}-image-{rbd image-name}/{podUid} @@ -912,14 +910,6 @@ func (rbd *rbd) GetPodDeviceMapPath() (string, string) { return rbd.rbdPodDeviceMapPath() } -func (rbd *rbdDiskMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (rbd *rbdDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return volutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - func (rbd *rbd) rbdGlobalMapPath(spec *volume.Spec) (string, error) { mon, err := getVolumeSourceMonitors(spec) if err != nil { @@ -1003,6 +993,10 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error { return nil } +func (rbd *rbdDiskUnmapper) UnmapPodDevice() error { + return nil +} + func getVolumeSourceMonitors(spec *volume.Spec) ([]string, error) { if spec.Volume != nil && spec.Volume.RBD != nil { return spec.Volume.RBD.CephMonitors, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index 69ea75e429d..85044e85fde 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -32,22 +32,23 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilpath "k8s.io/utils/path" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" - utilpath "k8s.io/utils/path" ) const ( imageWatcherStr = "watcher=" - imageSizeStr = "size " kubeLockMagic = "kubelet_lock_magic_" // The following three values are used for 30 seconds timeout // while waiting for RBD Watcher to expire. @@ -228,22 +229,22 @@ func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) { // do not change this format - some tools like rbd-nbd are strict about it. imgPath := fmt.Sprintf("%s/%s", b.Pool, b.Image) if b.Secret != "" { - return b.exec.Run(rbdCmd, - "map", imgPath, "--id", b.Id, "-m", mon, "--key="+b.Secret) + return b.exec.Command(rbdCmd, + "map", imgPath, "--id", b.Id, "-m", mon, "--key="+b.Secret).CombinedOutput() } else { - return b.exec.Run(rbdCmd, - "map", imgPath, "--id", b.Id, "-m", mon, "-k", b.Keyring) + return b.exec.Command(rbdCmd, + "map", imgPath, "--id", b.Id, "-m", mon, "-k", b.Keyring).CombinedOutput() } } // Check if rbd-nbd tools are installed. -func checkRbdNbdTools(e mount.Exec) bool { - _, err := e.Run("modprobe", "nbd") +func checkRbdNbdTools(e utilexec.Interface) bool { + _, err := e.Command("modprobe", "nbd").CombinedOutput() if err != nil { klog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err) return false } - if _, err := e.Run("rbd-nbd", "--version"); err != nil { + if _, err := e.Command("rbd-nbd", "--version").CombinedOutput(); err != nil { klog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err) return false } @@ -336,7 +337,7 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { // Get the locker name, something like "client.1234". args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) + cmd, err = b.exec.Command("rbd", args...).CombinedOutput() output = string(cmd) klog.V(4).Infof("lock list output %q", output) if err != nil { @@ -354,7 +355,7 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { if len(locker) > 0 { args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) + cmd, err = b.exec.Command("rbd", args...).CombinedOutput() if err == nil { klog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) } else { @@ -433,7 +434,7 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { mon := util.kernelRBDMonitorsOpt(b.Mon) klog.V(1).Infof("rbd: map mon %s", mon) - _, err := b.exec.Run("modprobe", "rbd") + _, err := b.exec.Command("modprobe", "rbd").CombinedOutput() if err != nil { klog.Warningf("rbd: failed to load rbd kernel module:%v", err) } @@ -484,7 +485,7 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic } // rbd unmap - output, err := exec.Run(rbdCmd, "unmap", device) + output, err := exec.Command(rbdCmd, "unmap", device).CombinedOutput() if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output)) } @@ -543,7 +544,7 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error } // rbd unmap - output, err := exec.Run(rbdCmd, "unmap", device) + output, err := exec.Command(rbdCmd, "unmap", device).CombinedOutput() if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output))) } @@ -570,10 +571,6 @@ func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { return fmt.Errorf("rbd: decode err: %v.", err) } - if err != nil { - klog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) - return err - } // Remove rbd lock if found. // The disk is not attached to this node anymore, so the lock on image // for this node can be removed safely. @@ -606,7 +603,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo features := strings.Join(p.rbdMounter.imageFeatures, ",") args = append(args, "--image-feature", features) } - output, err = p.exec.Run("rbd", args...) + output, err = p.exec.Command("rbd", args...).CombinedOutput() if err != nil { klog.Warningf("failed to create rbd image, output %v", string(output)) @@ -633,8 +630,8 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { // rbd rm. mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon) klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) - output, err = p.exec.Run("rbd", - "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret) + output, err = p.exec.Command("rbd", + "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret).CombinedOutput() if err == nil { return nil } @@ -665,8 +662,8 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc // rbd resize. mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon) klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) - output, err = rbdExpander.exec.Run("rbd", - "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) + output, err = rbdExpander.exec.Command("rbd", + "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret).CombinedOutput() if err == nil { return newSizeQuant, nil } @@ -707,8 +704,8 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) { // rbd: error opening image 1234: (2) No such file or directory // klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) - output, err = b.exec.Run("rbd", - "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret, "--format=json") + output, err = b.exec.Command("rbd", + "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret, "-k=/dev/null", "--format=json").CombinedOutput() if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { @@ -770,8 +767,8 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory // klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) - cmd, err = b.exec.Run("rbd", - "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) + cmd, err = b.exec.Command("rbd", + "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret).CombinedOutput() output = string(cmd) if err, ok := err.(*exec.Error); ok { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD index c9b54e12734..f1410f50657 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD @@ -15,7 +15,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -26,6 +25,7 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/thecodeteam/goscaleio/types/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", ], ) @@ -40,7 +40,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/scaleio", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -52,7 +51,9 @@ go_library( "//vendor/github.com/thecodeteam/goscaleio:go_default_library", "//vendor/github.com/thecodeteam/goscaleio/types/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go index bc9b9868f7b..443a56f178c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" sio "github.com/thecodeteam/goscaleio" siotypes "github.com/thecodeteam/goscaleio/types/v1" @@ -78,10 +78,10 @@ type sioClient struct { inited bool diskRegex *regexp.Regexp mtx sync.Mutex - exec mount.Exec + exec utilexec.Interface } -func newSioClient(gateway, username, password string, sslEnabled bool, exec mount.Exec) (*sioClient, error) { +func newSioClient(gateway, username, password string, sslEnabled bool, exec utilexec.Interface) (*sioClient, error) { client := new(sioClient) client.gateway = gateway client.username = username @@ -321,7 +321,7 @@ func (c *sioClient) getGUID() (string, error) { if c.sdcGUID == "" { klog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg")) cmd := c.getSdcCmd() - output, err := c.exec.Run(cmd, "--query_guid") + output, err := c.exec.Command(cmd, "--query_guid").CombinedOutput() if err != nil { klog.Error(log("drv_cfg --query_guid failed: %v", err)) return "", err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go index e63b58da188..fa90311c5a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go @@ -20,9 +20,8 @@ import ( "errors" "strconv" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/klog" + utilexec "k8s.io/utils/exec" siotypes "github.com/thecodeteam/goscaleio/types/v1" ) @@ -30,10 +29,10 @@ import ( type sioMgr struct { client sioInterface configData map[string]string - exec mount.Exec + exec utilexec.Interface } -func newSioMgr(configs map[string]string, exec mount.Exec) (*sioMgr, error) { +func newSioMgr(configs map[string]string, exec utilexec.Interface) (*sioMgr, error) { if configs == nil { return nil, errors.New("missing configuration data") } @@ -130,7 +129,7 @@ func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, er // handle vol if already attached if len(vol.MappedSdcInfo) > 0 { if m.isSdcMappedToVol(iid, vol) { - klog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid)) + klog.V(4).Info(log("skipping attachment, volume %s already attached to sdc %s", volName, iid)) return devs[vol.ID], nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go index 7b045376dd5..b1dd18776d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go @@ -72,10 +72,6 @@ func (p *sioPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.ScaleIO != nil) } -func (p *sioPlugin) IsMigratedToCSI() bool { - return false -} - func (p *sioPlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go index 67bc549b9b9..9904147be53 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go @@ -23,17 +23,18 @@ import ( "strconv" "strings" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) type sioVolume struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD index 288808826ff..cb168c9532d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD @@ -14,7 +14,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/secret", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -22,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS index 93c0d5e773d..8ef14a0c2a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS @@ -4,6 +4,7 @@ approvers: - pmorie - saad-ali - thockin +emeritus_approvers: - matchstick reviewers: - ivan4th diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index 90345d3f96a..3eee6d91827 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -19,15 +19,16 @@ package secret import ( "fmt" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -80,10 +81,6 @@ func (plugin *secretPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.Secret != nil } -func (plugin *secretPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *secretPlugin) RequiresRemount() bool { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD index db9aeeb6dec..8d1b5730cbe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD @@ -15,7 +15,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/storageos", deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -27,6 +26,8 @@ go_library( "//vendor/github.com/storageos/go-api:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -39,7 +40,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -48,6 +48,8 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/OWNERS deleted file mode 100644 index 7fa4df36eb1..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -maintainers: -- croomes -- rusenask -- chira001 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go index a126d50799a..a90e384e9a6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go @@ -23,17 +23,19 @@ import ( "path/filepath" "strings" - "k8s.io/api/core/v1" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -90,10 +92,6 @@ func (plugin *storageosPlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.StorageOS != nil) } -func (plugin *storageosPlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *storageosPlugin) RequiresRemount() bool { return false } @@ -115,7 +113,7 @@ func (plugin *storageosPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu return plugin.newMounterInternal(spec, pod, apiCfg, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, apiCfg *storageosAPIConfig, manager storageosManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) { +func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, apiCfg *storageosAPIConfig, manager storageosManager, mounter mount.Interface, exec utilexec.Interface) (volume.Mounter, error) { volName, volNamespace, fsType, readOnly, err := getVolumeInfoFromSpec(spec) if err != nil { @@ -147,7 +145,7 @@ func (plugin *storageosPlugin) NewUnmounter(pvName string, podUID types.UID) (vo return plugin.newUnmounterInternal(pvName, podUID, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } -func (plugin *storageosPlugin) newUnmounterInternal(pvName string, podUID types.UID, manager storageosManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) { +func (plugin *storageosPlugin) newUnmounterInternal(pvName string, podUID types.UID, manager storageosManager, mounter mount.Interface, exec utilexec.Interface) (volume.Unmounter, error) { // Parse volume namespace & name from mountpoint if mounted volNamespace, volName, err := getVolumeInfo(pvName, podUID, plugin.host) @@ -311,7 +309,7 @@ type storageos struct { apiCfg *storageosAPIConfig manager storageosManager mounter mount.Interface - exec mount.Exec + exec utilexec.Interface plugin *storageosPlugin volume.MetricsProvider } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go index c7f430ea5d4..05dcf039c14 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go @@ -23,11 +23,10 @@ import ( "path/filepath" "strings" - "k8s.io/kubernetes/pkg/util/mount" - storageosapi "github.com/storageos/go-api" storageostypes "github.com/storageos/go-api/types" "k8s.io/klog" + utilexec "k8s.io/utils/exec" ) const ( @@ -331,7 +330,7 @@ func pathDeviceType(path string) (deviceType, error) { // attachFileDevice takes a path to a regular file and makes it available as an // attached block device. -func attachFileDevice(path string, exec mount.Exec) (string, error) { +func attachFileDevice(path string, exec utilexec.Interface) (string, error) { blockDevicePath, err := getLoopDevice(path, exec) if err != nil && err.Error() != ErrDeviceNotFound { return "", err @@ -349,7 +348,7 @@ func attachFileDevice(path string, exec mount.Exec) (string, error) { } // Returns the full path to the loop device associated with the given path. -func getLoopDevice(path string, exec mount.Exec) (string, error) { +func getLoopDevice(path string, exec utilexec.Interface) (string, error) { _, err := os.Stat(path) if os.IsNotExist(err) { return "", errors.New(ErrNotAvailable) @@ -359,7 +358,7 @@ func getLoopDevice(path string, exec mount.Exec) (string, error) { } args := []string{"-j", path} - out, err := exec.Run(losetupPath, args...) + out, err := exec.Command(losetupPath, args...).CombinedOutput() if err != nil { klog.V(2).Infof("Failed device discover command for path %s: %v", path, err) return "", err @@ -367,9 +366,9 @@ func getLoopDevice(path string, exec mount.Exec) (string, error) { return parseLosetupOutputForDevice(out) } -func makeLoopDevice(path string, exec mount.Exec) (string, error) { +func makeLoopDevice(path string, exec utilexec.Interface) (string, error) { args := []string{"-f", "-P", "--show", path} - out, err := exec.Run(losetupPath, args...) + out, err := exec.Command(losetupPath, args...).CombinedOutput() if err != nil { klog.V(2).Infof("Failed device create command for path %s: %v", path, err) return "", err @@ -377,9 +376,9 @@ func makeLoopDevice(path string, exec mount.Exec) (string, error) { return parseLosetupOutputForDevice(out) } -func removeLoopDevice(device string, exec mount.Exec) error { +func removeLoopDevice(device string, exec utilexec.Interface) error { args := []string{"-d", device} - out, err := exec.Run(losetupPath, args...) + out, err := exec.Command(losetupPath, args...).CombinedOutput() if err != nil { if !strings.Contains(string(out), "No such device or address") { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index 55d9a2ecfbc..5e279f5173a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -23,7 +23,6 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/resizefs:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util/types:go_default_library", @@ -44,6 +43,8 @@ go_library( "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -73,6 +74,9 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/client-go/util/testing:go_default_library", ], @@ -91,7 +95,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/volume/util/exec:all-srcs", "//pkg/volume/util/fs:all-srcs", "//pkg/volume/util/fsquota:all-srcs", "//pkg/volume/util/hostutil:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS index 9485dfe9514..937adb56c50 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS @@ -6,4 +6,3 @@ reviewers: - saad-ali - rootfs - jingxu97 -- screeley44 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index 125aea2f48d..ef1939e7c9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -304,7 +304,7 @@ func shouldWriteFile(path string, content []byte) (bool, error) { return false, err } - return (bytes.Compare(content, contentOnFs) != 0), nil + return !bytes.Equal(content, contentOnFs), nil } // pathsToRemove walks the current version of the data directory and diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/BUILD deleted file mode 100644 index 5f4fd655ac6..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/BUILD +++ /dev/null @@ -1,74 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "exec_mount.go", - "exec_mount_unsupported.go", - ], - importpath = "k8s.io/kubernetes/pkg/volume/util/exec", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//pkg/util/mount:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//pkg/util/mount:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = ["exec_mount_test.go"], - embed = [":go_default_library"], - deps = select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount.go deleted file mode 100644 index 868f989ecea..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package exec - -import ( - "fmt" - - "k8s.io/klog" - - "k8s.io/kubernetes/pkg/util/mount" -) - -// ExecMounter is a mounter that uses provided Exec interface to mount and -// unmount a filesystem. For all other calls it uses a wrapped mounter. -type execMounter struct { - wrappedMounter mount.Interface - exec mount.Exec -} - -// NewExecMounter returns a mounter that uses provided Exec interface to mount and -// unmount a filesystem. For all other calls it uses a wrapped mounter. -func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { - return &execMounter{ - wrappedMounter: wrapped, - exec: exec, - } -} - -// execMounter implements mount.Interface -var _ mount.Interface = &execMounter{} - -// Mount runs mount(8) using given exec interface. -func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindOpts, bindRemountOpts := mount.MakeBindOpts(options) - - if bind { - err := m.doExecMount(source, target, fstype, bindOpts) - if err != nil { - return err - } - return m.doExecMount(source, target, fstype, bindRemountOpts) - } - - return m.doExecMount(source, target, fstype, options) -} - -// doExecMount calls exec(mount ) using given exec interface. -func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { - klog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) - mountArgs := mount.MakeMountArgs(source, target, fstype, options) - output, err := m.exec.Run("mount", mountArgs...) - klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) - if err != nil { - return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s", - err, "mount", source, target, fstype, options, string(output)) - } - - return err -} - -// Unmount runs umount(8) using given exec interface. -func (m *execMounter) Unmount(target string) error { - outputBytes, err := m.exec.Run("umount", target) - if err == nil { - klog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) - } else { - klog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) - } - - return err -} - -// List returns a list of all mounted filesystems. -func (m *execMounter) List() ([]mount.MountPoint, error) { - return m.wrappedMounter.List() -} - -// IsLikelyNotMountPoint determines whether a path is a mountpoint. -func (m *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { - return m.wrappedMounter.IsLikelyNotMountPoint(file) -} - -func (m *execMounter) GetMountRefs(pathname string) ([]string, error) { - return m.wrappedMounter.GetMountRefs(pathname) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount_unsupported.go deleted file mode 100644 index 1c4f8ecf38c..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/exec/exec_mount_unsupported.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build !linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package exec - -import ( - "errors" - - "k8s.io/kubernetes/pkg/util/mount" -) - -type execMounter struct{} - -var _ = mount.Interface(&execMounter{}) - -// NewExecMounter returns a mounter that uses provided Exec interface to mount and -// unmount a filesystem. For all other calls it uses a wrapped mounter. -func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { - return &execMounter{} -} - -func (mounter *execMounter) Mount(source string, target string, fstype string, options []string) error { - return nil -} - -func (mounter *execMounter) Unmount(target string) error { - return nil -} - -func (mounter *execMounter) List() ([]mount.MountPoint, error) { - return []mount.MountPoint{}, nil -} - -func (mounter *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { - return true, nil -} - -func (mounter *execMounter) GetMountRefs(pathname string) ([]string, error) { - return nil, errors.New("not implemented") -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/BUILD index 0193ae830f9..07da6389c2b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/BUILD @@ -11,7 +11,9 @@ go_library( visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:android": [ + "//pkg/volume/util/fsquota:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/volume/util/fsquota:go_default_library", @@ -24,6 +26,11 @@ go_library( "@io_bazel_rules_go//go/platform:freebsd": [ "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/volume/util/fsquota:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/volume/util/fsquota:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go index 21fb3a21e15..2796e93d19e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go @@ -68,9 +68,9 @@ func DiskUsage(path string) (*resource.Quantity, error) { } // Uses the same niceness level as cadvisor.fs does when running du // Uses -B 1 to always scale to a blocksize of 1 byte - out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", path).CombinedOutput() + out, err := exec.Command("nice", "-n", "19", "du", "-x", "-s", "-B", "1", path).CombinedOutput() if err != nil { - return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -s -B 1) on path %s with error %v", path, err) + return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -x -s -B 1) on path %s with error %v", path, err) } used, err := resource.ParseQuantity(strings.Fields(string(out))[0]) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/BUILD index b10fa34e5a5..523b56debbc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/BUILD @@ -12,11 +12,17 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/volume/util/fsquota/common:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/volume/util/fsquota/common:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", @@ -32,14 +38,23 @@ go_test( srcs = ["quota_linux_test.go"], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/features:go_default_library", + "//pkg/volume/util/fsquota/common:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume/util/fsquota/common:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/BUILD index 78f219d7fe9..60d8b5bfc43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/BUILD @@ -9,6 +9,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/util/fsquota/common", visibility = ["//visibility:public"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/klog:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go index 877b8fe9d28..40e30d5d66c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go @@ -17,11 +17,12 @@ limitations under the License. package fsquota import ( + "k8s.io/utils/mount" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" ) // Interface -- quota interface diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go index 65c24c52070..70de9c309e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go @@ -25,11 +25,12 @@ import ( "path/filepath" "sync" + "k8s.io/klog" + "k8s.io/utils/mount" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume/util/fsquota/common" ) @@ -110,7 +111,7 @@ func clearBackingDev(path string) { // Breaking this up helps with testing func detectMountpointInternal(m mount.Interface, path string) (string, error) { for path != "" && path != "/" { - // per pkg/util/mount/mount_linux this detects all but + // per k8s.io/utils/mount/mount_linux this detects all but // a bind mount from one part of a mount to another. // For our purposes that's fine; we simply want the "true" // mount point diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go index f1ae4cdd1f4..a192122b712 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go @@ -20,9 +20,11 @@ package fsquota import ( "errors" + + "k8s.io/utils/mount" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/util/mount" ) // Dummy quota implementation for systems that do not implement support diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/BUILD index 153d6a1e5d3..71cb37e5913 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/BUILD @@ -12,8 +12,13 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/util/hostutil", visibility = ["//visibility:public"], deps = [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -35,6 +40,9 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go index d098e91a1b5..688e28ecc87 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go @@ -21,7 +21,7 @@ import ( "os" "sync" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // FakeHostUtil is a fake HostUtils implementation for testing diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go index 68e5c7e999c..ceb067dbf04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // FileType enumerates the known set of possible file types. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go index 87979911fd9..8e7efc129fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go @@ -27,11 +27,9 @@ import ( "syscall" "golang.org/x/sys/unix" - "k8s.io/klog" + "k8s.io/utils/mount" utilpath "k8s.io/utils/path" - - "k8s.io/kubernetes/pkg/util/mount" ) const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go index a5e9081f919..303da14965c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go @@ -22,7 +22,7 @@ import ( "errors" "os" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) // HostUtil is an HostUtils implementation that allows compilation on diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go index bb74e939e72..d8d05365f1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go @@ -26,9 +26,8 @@ import ( "strings" "k8s.io/klog" + "k8s.io/utils/mount" utilpath "k8s.io/utils/path" - - "k8s.io/kubernetes/pkg/util/mount" ) // HostUtil implements HostUtils for Windows platforms. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD index 6204312d371..f9bb907bbab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD @@ -17,9 +17,7 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/events:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/csi:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/hostutil:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", @@ -30,13 +28,13 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/errors:go_default_library", "//staging/src/k8s.io/csi-translation-lib:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -48,10 +46,8 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/features:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/awsebs:go_default_library", - "//pkg/volume/csi:go_default_library", "//pkg/volume/csi/testing:go_default_library", "//pkg/volume/gcepd:go_default_library", "//pkg/volume/testing:go_default_library", @@ -61,12 +57,10 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//staging/src/k8s.io/component-base/featuregate:go_default_library", - "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", + "//staging/src/k8s.io/csi-translation-lib:go_default_library", "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/fakegenerator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/fakegenerator.go index 69c1ab693cc..38d75763cd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/fakegenerator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/fakegenerator.go @@ -21,6 +21,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/hostutil" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -88,6 +89,10 @@ func (f *fakeOGCounter) GetVolumePluginMgr() *volume.VolumePluginMgr { return nil } +func (f *fakeOGCounter) GetCSITranslator() InTreeToCSITranslator { + return csitrans.New() +} + func (f *fakeOGCounter) GenerateBulkVolumeVerifyFunc( map[types.NodeName][]*volume.Spec, string, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index b61d3d763be..c95029e6fd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -25,13 +25,12 @@ import ( "time" "k8s.io/klog" + "k8s.io/utils/mount" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" @@ -640,45 +639,25 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( continue } - // Migration: Must also check the Node since Attach would have been done with in-tree if node is not using Migration - nu, err := nodeUsingCSIPlugin(oe.operationGenerator, volumeAttached.VolumeSpec, node) + volumePlugin, err := + oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.NodeUsingCSIPlugin failed", err).Error()) + klog.Errorf( + "VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v", + volumeAttached.VolumeName, + volumeAttached.VolumeSpec.Name(), + volumeAttached.NodeName, + err) continue } - - var volumePlugin volume.VolumePlugin - if useCSIPlugin(oe.operationGenerator.GetVolumePluginMgr(), volumeAttached.VolumeSpec) && nu { - // The volume represented by this spec is CSI and thus should be migrated - volumePlugin, err = oe.operationGenerator.GetVolumePluginMgr().FindPluginByName(csi.CSIPluginName) - if err != nil || volumePlugin == nil { - klog.Errorf( - "VolumesAreAttached.Name failed for volume %q (spec.Name: %q) on node %q with error: %v", - volumeAttached.VolumeName, - volumeAttached.VolumeSpec.Name(), - volumeAttached.NodeName, - err) - continue - } - - csiSpec, err := translateSpec(volumeAttached.VolumeSpec) - if err != nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.TranslateSpec failed", err).Error()) - continue - } - volumeAttached.VolumeSpec = csiSpec - } else { - volumePlugin, err = - oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec) - if err != nil || volumePlugin == nil { - klog.Errorf( - "VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v", - volumeAttached.VolumeName, - volumeAttached.VolumeSpec.Name(), - volumeAttached.NodeName, - err) - continue - } + if volumePlugin == nil { + // should never happen since FindPluginBySpec always returns error if volumePlugin = nil + klog.Errorf( + "Failed to find volume plugin for volume %q (spec.Name: %q) on node %q", + volumeAttached.VolumeName, + volumeAttached.VolumeSpec.Name(), + volumeAttached.NodeName) + continue } pluginName := volumePlugin.GetPluginName() @@ -711,9 +690,8 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( // If node doesn't support Bulk volume polling it is best to poll individually nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld) if nodeError != nil { - klog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError) + klog.Errorf("VerifyVolumesAreAttached failed for volumes %v, node %q with error %v", nodeAttachedVolumes, node, nodeError) } - break } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index ff864df2565..91b86de990d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -27,18 +27,17 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" volerr "k8s.io/cloud-provider/volume/errors" - csilib "k8s.io/csi-translation-lib" + csitrans "k8s.io/csi-translation-lib" "k8s.io/klog" "k8s.io/kubernetes/pkg/features" kevents "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/util" + ioutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" @@ -49,6 +48,18 @@ const ( unknownAttachableVolumePlugin string = "UnknownAttachableVolumePlugin" ) +// InTreeToCSITranslator contains methods required to check migratable status +// and perform translations from InTree PVs and Inline to CSI +type InTreeToCSITranslator interface { + IsPVMigratable(pv *v1.PersistentVolume) bool + IsInlineMigratable(vol *v1.Volume) bool + IsMigratableIntreePluginByName(inTreePluginName string) bool + GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) + GetCSINameFromInTreeName(pluginName string) (string, error) + TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) + TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) +} + var _ OperationGenerator = &operationGenerator{} type operationGenerator struct { @@ -70,6 +81,8 @@ type operationGenerator struct { // blkUtil provides volume path related operations for block volume blkUtil volumepathhandler.BlockVolumePathHandler + + translator InTreeToCSITranslator } // NewOperationGenerator is returns instance of operationGenerator @@ -85,6 +98,7 @@ func NewOperationGenerator(kubeClient clientset.Interface, recorder: recorder, checkNodeCapabilitiesBeforeMount: checkNodeCapabilitiesBeforeMount, blkUtil: blkUtil, + translator: csitrans.New(), } } @@ -123,6 +137,9 @@ type OperationGenerator interface { // GetVolumePluginMgr returns volume plugin manager GetVolumePluginMgr() *volume.VolumePluginMgr + // GetCSITranslator returns the CSI Translation Library + GetCSITranslator() InTreeToCSITranslator + GenerateBulkVolumeVerifyFunc( map[types.NodeName][]*volume.Spec, string, @@ -151,38 +168,12 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( klog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName) continue } - - // Migration: Must also check the Node since Attach would have been done with in-tree if node is not using Migration - nu, err := nodeUsingCSIPlugin(og, volumeAttached.VolumeSpec, nodeName) - if err != nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.NodeUsingCSIPlugin failed", err).Error()) + volumePlugin, err := + og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec) + if err != nil || volumePlugin == nil { + klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) continue } - - var volumePlugin volume.VolumePlugin - if useCSIPlugin(og.volumePluginMgr, volumeAttached.VolumeSpec) && nu { - // The volume represented by this spec is CSI and thus should be migrated - volumePlugin, err = og.volumePluginMgr.FindPluginByName(csi.CSIPluginName) - if err != nil || volumePlugin == nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginByName failed", err).Error()) - continue - } - - csiSpec, err := translateSpec(volumeAttached.VolumeSpec) - if err != nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.TranslateSpec failed", err).Error()) - continue - } - volumeAttached.VolumeSpec = csiSpec - } else { - volumePlugin, err = - og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec) - if err != nil || volumePlugin == nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) - continue - } - } - volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()] if !pluginExists { volumeSpecList = []*volume.Spec{} @@ -326,34 +317,12 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( func (og *operationGenerator) GenerateAttachVolumeFunc( volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { - originalSpec := volumeToAttach.VolumeSpec - attachVolumeFunc := func() (error, error) { - var attachableVolumePlugin volume.AttachableVolumePlugin - nu, err := nodeUsingCSIPlugin(og, volumeToAttach.VolumeSpec, volumeToAttach.NodeName) - if err != nil { - return volumeToAttach.GenerateError("AttachVolume.NodeUsingCSIPlugin failed", err) - } - - // useCSIPlugin will check both CSIMigration and the plugin specific feature gates - if useCSIPlugin(og.volumePluginMgr, volumeToAttach.VolumeSpec) && nu { - // The volume represented by this spec is CSI and thus should be migrated - attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(csi.CSIPluginName) - if err != nil || attachableVolumePlugin == nil { - return volumeToAttach.GenerateError("AttachVolume.FindAttachablePluginByName failed", err) - } - - csiSpec, err := translateSpec(volumeToAttach.VolumeSpec) - if err != nil { - return volumeToAttach.GenerateError("AttachVolume.TranslateSpec failed", err) - } - volumeToAttach.VolumeSpec = csiSpec - } else { - attachableVolumePlugin, err = - og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) - if err != nil || attachableVolumePlugin == nil { - return volumeToAttach.GenerateError("AttachVolume.FindAttachablePluginBySpec failed", err) - } + attachVolumeFunc := func() (error, error) { + attachableVolumePlugin, err := + og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) + if err != nil || attachableVolumePlugin == nil { + return volumeToAttach.GenerateError("AttachVolume.FindAttachablePluginBySpec failed", err) } volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() @@ -372,7 +341,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } addErr := actualStateOfWorld.MarkVolumeAsUncertain( v1.UniqueVolumeName(""), - originalSpec, + volumeToAttach.VolumeSpec, uncertainNode) if addErr != nil { klog.Errorf("AttachVolume.MarkVolumeAsUncertain fail to add the volume %q to actual state with %s", volumeToAttach.VolumeName, addErr) @@ -391,7 +360,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - v1.UniqueVolumeName(""), originalSpec, volumeToAttach.NodeName, devicePath) + v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. return volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) @@ -409,30 +378,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } attachableVolumePluginName := unknownAttachableVolumePlugin - // TODO(dyzz) Ignoring this error means that if the plugin is migrated and - // any transient error is encountered (API unavailable, driver not installed) - // the operation will have it's metric registered with the in-tree plugin instead - // of the CSI Driver we migrated to. Fixing this requires a larger refactor that - // involves determining the plugin_name for the metric generating "CompleteFunc" - // during the actual "OperationFunc" and not during this generation function - - nu, err := nodeUsingCSIPlugin(og, volumeToAttach.VolumeSpec, volumeToAttach.NodeName) - if err != nil { - klog.Errorf("GenerateAttachVolumeFunc failed to check if node is using CSI Plugin, metric for this operation may be inaccurate: %v", err) - } - // Need to translate the spec here if the plugin is migrated so that the metrics - // emitted show the correct (migrated) plugin - if useCSIPlugin(og.volumePluginMgr, volumeToAttach.VolumeSpec) && nu { - csiSpec, err := translateSpec(volumeToAttach.VolumeSpec) - if err == nil { - volumeToAttach.VolumeSpec = csiSpec - } - // If we have an error here we ignore it, the metric emitted will then be for the - // in-tree plugin. This error case(skipped one) will also trigger an error - // while the generated function is executed. And those errors will be handled during the execution of the generated - // function with a back off policy. - } // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) @@ -456,6 +402,10 @@ func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { return og.volumePluginMgr } +func (og *operationGenerator) GetCSITranslator() InTreeToCSITranslator { + return og.translator +} + func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach AttachedVolume, verifySafeToDetach bool, @@ -466,32 +416,10 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( var err error if volumeToDetach.VolumeSpec != nil { - // Get attacher plugin - nu, err := nodeUsingCSIPlugin(og, volumeToDetach.VolumeSpec, volumeToDetach.NodeName) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.NodeUsingCSIPlugin failed", err) - } - - // useCSIPlugin will check both CSIMigration and the plugin specific feature gate - if useCSIPlugin(og.volumePluginMgr, volumeToDetach.VolumeSpec) && nu { - // The volume represented by this spec is CSI and thus should be migrated - attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(csi.CSIPluginName) - if err != nil || attachableVolumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) - } - - csiSpec, err := translateSpec(volumeToDetach.VolumeSpec) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.TranslateSpec failed", err) - } - - volumeToDetach.VolumeSpec = csiSpec - } else { - attachableVolumePlugin, err = - og.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) - if err != nil || attachableVolumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) - } + attachableVolumePlugin, err = + og.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) + if err != nil || attachableVolumePlugin == nil { + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } volumeName, err = @@ -508,25 +436,9 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } - // TODO(dyzz): This case can't distinguish between PV and In-line which is necessary because - // if it was PV it may have been migrated, but the same plugin with in-line may not have been. - // Suggestions welcome... - if csilib.IsMigratableIntreePluginByName(pluginName) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { - // The volume represented by this spec is CSI and thus should be migrated - attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(csi.CSIPluginName) - if err != nil || attachableVolumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) - } - // volumeToDetach.VolumeName here is always the in-tree volume name - // therefore a workaround is required. volumeToDetach.DevicePath - // is the attachID which happens to be what volumeName is needed for in Detach. - // Therefore we set volumeName to the attachID. And CSI Detach can detect and use that. - volumeName = volumeToDetach.DevicePath - } else { - attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) - if err != nil || attachableVolumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginByName failed", err) - } + attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) + if err != nil || attachableVolumePlugin == nil { + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginByName failed", err) } } @@ -577,21 +489,8 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations { - // Get mounter plugin - originalSpec := volumeToMount.VolumeSpec + volumePluginName := unknownVolumePlugin - // Need to translate the spec here if the plugin is migrated so that the metrics - // emitted show the correct (migrated) plugin - if useCSIPlugin(og.volumePluginMgr, volumeToMount.VolumeSpec) { - csiSpec, err := translateSpec(volumeToMount.VolumeSpec) - if err == nil { - volumeToMount.VolumeSpec = csiSpec - } - // If we have an error here we ignore it, the metric emitted will then be for the - // in-tree plugin. This error case(skipped one) will also trigger an error - // while the generated function is executed. And those errors will be handled during the execution of the generated - // function with a back off policy. - } volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err == nil && volumePlugin != nil { @@ -599,16 +498,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } mountVolumeFunc := func() (error, error) { - // Get mounter plugin - if useCSIPlugin(og.volumePluginMgr, volumeToMount.VolumeSpec) { - csiSpec, err := translateSpec(volumeToMount.VolumeSpec) - if err != nil { - return volumeToMount.GenerateError("MountVolume.TranslateSpec failed", err) - } - volumeToMount.VolumeSpec = csiSpec - } - volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err != nil || volumePlugin == nil { return volumeToMount.GenerateError("MountVolume.FindPluginBySpec failed", err) @@ -766,7 +656,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( nil, volumeToMount.OuterVolumeSpecName, volumeToMount.VolumeGidValue, - originalSpec) + volumeToMount.VolumeSpec) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MountVolume.MarkVolumeAsMounted failed", markVolMountedErr) @@ -793,16 +683,8 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) { - - var pluginName string - if volumeToUnmount.VolumeSpec != nil && useCSIPlugin(og.volumePluginMgr, volumeToUnmount.VolumeSpec) { - pluginName = csi.CSIPluginName - } else { - pluginName = volumeToUnmount.PluginName - } - // Get mountable plugin - volumePlugin, err := og.volumePluginMgr.FindPluginByName(pluginName) + volumePlugin, err := og.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName) if err != nil || volumePlugin == nil { return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) } @@ -861,22 +743,9 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, hostutil hostutil.HostUtils) (volumetypes.GeneratedOperations, error) { - - var pluginName string - if useCSIPlugin(og.volumePluginMgr, deviceToDetach.VolumeSpec) { - pluginName = csi.CSIPluginName - csiSpec, err := translateSpec(deviceToDetach.VolumeSpec) - if err != nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.TranslateSpec failed", err) - } - deviceToDetach.VolumeSpec = csiSpec - } else { - pluginName = deviceToDetach.PluginName - } - // Get DeviceMounter plugin deviceMountableVolumePlugin, err := - og.volumePluginMgr.FindDeviceMountablePluginByName(pluginName) + og.volumePluginMgr.FindDeviceMountablePluginByName(deviceToDetach.PluginName) if err != nil || deviceMountableVolumePlugin == nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindDeviceMountablePluginByName failed", err) } @@ -963,17 +832,6 @@ func (og *operationGenerator) GenerateMapVolumeFunc( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { - originalSpec := volumeToMount.VolumeSpec - // Translate to CSI spec if migration enabled - if useCSIPlugin(og.volumePluginMgr, originalSpec) { - csiSpec, err := translateSpec(originalSpec) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MapVolume.TranslateSpec failed", err) - } - - volumeToMount.VolumeSpec = csiSpec - } - // Get block volume mapper plugin blockVolumePlugin, err := og.volumePluginMgr.FindMapperPluginBySpec(volumeToMount.VolumeSpec) @@ -1016,7 +874,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateError("MapVolume.GetDeviceMountPath failed", err) + return volumeToMount.GenerateError("MapVolume.GetGlobalMapPath failed", err) } if volumeAttacher != nil { // Wait for attachable volumes to finish attaching @@ -1032,20 +890,41 @@ func (og *operationGenerator) GenerateMapVolumeFunc( klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) } - // A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice() - pluginDevicePath, mapErr := blockVolumeMapper.SetUpDevice() - if mapErr != nil { - // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateError("MapVolume.SetUp failed", mapErr) + // Call SetUpDevice if blockVolumeMapper implements CustomBlockVolumeMapper + if customBlockVolumeMapper, ok := blockVolumeMapper.(volume.CustomBlockVolumeMapper); ok { + mapErr := customBlockVolumeMapper.SetUpDevice() + if mapErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("MapVolume.SetUpDevice failed", mapErr) + } } - // if pluginDevicePath is provided, assume attacher may not provide device - // or attachment flow uses SetupDevice to get device path - if len(pluginDevicePath) != 0 { - devicePath = pluginDevicePath + // Update actual state of world to reflect volume is globally mounted + markedDevicePath := devicePath + markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted( + volumeToMount.VolumeName, markedDevicePath, globalMapPath) + if markDeviceMappedErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) } - if len(devicePath) == 0 { - return volumeToMount.GenerateError("MapVolume failed", goerrors.New("Device path of the volume is empty")) + + // Call MapPodDevice if blockVolumeMapper implements CustomBlockVolumeMapper + if customBlockVolumeMapper, ok := blockVolumeMapper.(volume.CustomBlockVolumeMapper); ok { + // Execute driver specific map + pluginDevicePath, mapErr := customBlockVolumeMapper.MapPodDevice() + if mapErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("MapVolume.MapPodDevice failed", mapErr) + } + + // if pluginDevicePath is provided, assume attacher may not provide device + // or attachment flow uses SetupDevice to get device path + if len(pluginDevicePath) != 0 { + devicePath = pluginDevicePath + } + if len(devicePath) == 0 { + return volumeToMount.GenerateError("MapVolume failed", goerrors.New("Device path of the volume is empty")) + } } // When kubelet is containerized, devicePath may be a symlink at a place unavailable to @@ -1062,39 +941,33 @@ func (og *operationGenerator) GenerateMapVolumeFunc( return volumeToMount.GenerateError("MapVolume.EvalHostSymlinks failed", err) } - // Map device to global and pod device map path - volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath() - mapErr = blockVolumeMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID) - if mapErr != nil { - // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) - } - - // Take filedescriptor lock to keep a block device opened. Otherwise, there is a case - // that the block device is silently removed and attached another device with same name. - // Container runtime can't handler this problem. To avoid unexpected condition fd lock - // for the block device is required. - _, err = og.blkUtil.AttachFileDevice(devicePath) - if err != nil { - return volumeToMount.GenerateError("MapVolume.AttachFileDevice failed", err) + // Update actual state of world with the devicePath again, if devicePath has changed from markedDevicePath + // TODO: This can be improved after #82492 is merged and ASW has state. + if markedDevicePath != devicePath { + markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted( + volumeToMount.VolumeName, devicePath, globalMapPath) + if markDeviceMappedErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) + } } - // Update actual state of world to reflect volume is globally mounted - markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted( - volumeToMount.VolumeName, devicePath, globalMapPath) - if markDeviceMappedErr != nil { + // Execute common map + volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath() + mapErr := ioutil.MapBlockVolume(og.blkUtil, devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID) + if mapErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) + return volumeToMount.GenerateError("MapVolume.MapBlockVolume failed", mapErr) } // Device mapping for global map path succeeded - simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) + simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapPodDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) verbosity := klog.Level(4) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) klog.V(verbosity).Infof(detailedMsg) // Device mapping for pod device map path succeeded - simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath)) + simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapPodDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath)) verbosity = klog.Level(1) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) klog.V(verbosity).Infof(detailedMsg) @@ -1118,7 +991,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( blockVolumeMapper, volumeToMount.OuterVolumeSpecName, volumeToMount.VolumeGidValue, - originalSpec) + volumeToMount.VolumeSpec) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr) @@ -1149,32 +1022,12 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { - var blockVolumePlugin volume.BlockVolumePlugin - var err error - // Translate to CSI spec if migration enabled - // And get block volume unmapper plugin - if volumeToUnmount.VolumeSpec != nil && useCSIPlugin(og.volumePluginMgr, volumeToUnmount.VolumeSpec) { - csiSpec, err := translateSpec(volumeToUnmount.VolumeSpec) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.TranslateSpec failed", err) - } - - volumeToUnmount.VolumeSpec = csiSpec - - blockVolumePlugin, err = - og.volumePluginMgr.FindMapperPluginByName(csi.CSIPluginName) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) - } - } else { - blockVolumePlugin, err = - og.volumePluginMgr.FindMapperPluginByName(volumeToUnmount.PluginName) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) - } + // Get block volume unmapper plugin + blockVolumePlugin, err := + og.volumePluginMgr.FindMapperPluginByName(volumeToUnmount.PluginName) + if err != nil { + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) } - - var blockVolumeUnmapper volume.BlockVolumeUnmapper if blockVolumePlugin == nil { return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } @@ -1185,21 +1038,26 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( } unmapVolumeFunc := func() (error, error) { - // Try to unmap volumeName symlink under pod device map path dir // pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName} podDeviceUnmapPath, volName := blockVolumeUnmapper.GetPodDeviceMapPath() - unmapDeviceErr := og.blkUtil.UnmapDevice(podDeviceUnmapPath, volName) - if unmapDeviceErr != nil { - // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on pod device map path failed", unmapDeviceErr) - } - // Try to unmap podUID symlink under global map path dir // plugins/kubernetes.io/{PluginName}/volumeDevices/{volumePluginDependentPath}/{podUID} globalUnmapPath := volumeToUnmount.DeviceMountPath - unmapDeviceErr = og.blkUtil.UnmapDevice(globalUnmapPath, string(volumeToUnmount.PodUID)) - if unmapDeviceErr != nil { + + // Execute common unmap + unmapErr := ioutil.UnmapBlockVolume(og.blkUtil, globalUnmapPath, podDeviceUnmapPath, volName, volumeToUnmount.PodUID) + if unmapErr != nil { // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) + return volumeToUnmount.GenerateError("UnmapVolume.UnmapBlockVolume failed", unmapErr) + } + + // Call UnmapPodDevice if blockVolumeUnmapper implements CustomBlockVolumeUnmapper + if customBlockVolumeUnmapper, ok := blockVolumeUnmapper.(volume.CustomBlockVolumeUnmapper); ok { + // Execute plugin specific unmap + unmapErr = customBlockVolumeUnmapper.UnmapPodDevice() + if unmapErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToUnmount.GenerateError("UnmapVolume.UnmapPodDevice failed", unmapErr) + } } klog.Infof( @@ -1246,27 +1104,10 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( actualStateOfWorld ActualStateOfWorldMounterUpdater, hostutil hostutil.HostUtils) (volumetypes.GeneratedOperations, error) { - var blockVolumePlugin volume.BlockVolumePlugin - var err error - // Translate to CSI spec if migration enabled - if useCSIPlugin(og.volumePluginMgr, deviceToDetach.VolumeSpec) { - csiSpec, err := translateSpec(deviceToDetach.VolumeSpec) - if err != nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.TranslateSpec failed", err) - } - - deviceToDetach.VolumeSpec = csiSpec - blockVolumePlugin, err = - og.volumePluginMgr.FindMapperPluginByName(csi.CSIPluginName) - if err != nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed", err) - } - } else { - blockVolumePlugin, err = - og.volumePluginMgr.FindMapperPluginByName(deviceToDetach.PluginName) - if err != nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed", err) - } + blockVolumePlugin, err := + og.volumePluginMgr.FindMapperPluginByName(deviceToDetach.PluginName) + if err != nil { + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed", err) } if blockVolumePlugin == nil { @@ -1284,43 +1125,25 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( // Search under globalMapPath dir if all symbolic links from pods have been removed already. // If symbolic links are there, pods may still refer the volume. globalMapPath := deviceToDetach.DeviceMountPath - refs, err := og.blkUtil.GetDeviceSymlinkRefs(deviceToDetach.DevicePath, globalMapPath) + refs, err := og.blkUtil.GetDeviceBindMountRefs(deviceToDetach.DevicePath, globalMapPath) if err != nil { - return deviceToDetach.GenerateError("UnmapDevice.GetDeviceSymlinkRefs check failed", err) + return deviceToDetach.GenerateError("UnmapDevice.GetDeviceBindMountRefs check failed", err) } if len(refs) > 0 { err = fmt.Errorf("The device %q is still referenced from other Pods %v", globalMapPath, refs) return deviceToDetach.GenerateError("UnmapDevice failed", err) } - // The block volume is not referenced from Pods. Release file descriptor lock. - // This should be done before calling TearDownDevice, because some plugins that do local detach - // in TearDownDevice will fail in detaching device due to the refcnt on the loopback device. - klog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) - loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath) - if err != nil { - if err.Error() == volumepathhandler.ErrDeviceNotFound { - klog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) - } else { - errInfo := "UnmapDevice.GetLoopDevice failed to get loopback device, " + fmt.Sprintf("device path: %q", deviceToDetach.DevicePath) - return deviceToDetach.GenerateError(errInfo, err) - } - } else { - if len(loopPath) != 0 { - err = og.blkUtil.RemoveLoopDevice(loopPath) - if err != nil { - return deviceToDetach.GenerateError("UnmapDevice.RemoveLoopDevice failed", err) - } + // Call TearDownDevice if blockVolumeUnmapper implements CustomBlockVolumeUnmapper + if customBlockVolumeUnmapper, ok := blockVolumeUnmapper.(volume.CustomBlockVolumeUnmapper); ok { + // Execute tear down device + unmapErr := customBlockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) + if unmapErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr) } } - // Execute tear down device - unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) - if unmapErr != nil { - // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr) - } - // Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data // on the dir are unnecessary, clean up it. removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath) @@ -1561,22 +1384,13 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { - fsResizeFunc := func() (error, error) { - // Need to translate the spec here if the plugin is migrated so that the metrics - // emitted show the correct (migrated) plugin - if useCSIPlugin(og.volumePluginMgr, volumeToMount.VolumeSpec) { - csiSpec, err := translateSpec(volumeToMount.VolumeSpec) - if err != nil { - return volumeToMount.GenerateError("NodeExpandVolume.translateSpec failed", err) - } - volumeToMount.VolumeSpec = csiSpec - } - volumePlugin, err := - og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) - if err != nil || volumePlugin == nil { - return volumeToMount.GenerateError("NodeExpandVolume.FindPluginBySpec failed", err) - } + volumePlugin, err := + og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) + if err != nil || volumePlugin == nil { + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("NodeExpandVolume.FindPluginBySpec failed", err) + } + fsResizeFunc := func() (error, error) { var resizeDone bool var simpleErr, detailedErr error resizeOptions := volume.NodeResizeOptions{ @@ -1634,24 +1448,6 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc( } } - // Need to translate the spec here if the plugin is migrated so that the metrics - // emitted show the correct (migrated) plugin - if useCSIPlugin(og.volumePluginMgr, volumeToMount.VolumeSpec) { - csiSpec, err := translateSpec(volumeToMount.VolumeSpec) - if err == nil { - volumeToMount.VolumeSpec = csiSpec - } - // If we have an error here we ignore it, the metric emitted will then be for the - // in-tree plugin. This error case(skipped one) will also trigger an error - // while the generated function is executed. And those errors will be handled during the execution of the generated - // function with a back off policy. - } - volumePlugin, err := - og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) - if err != nil || volumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("NodeExpandVolume.FindPluginBySpec failed", err) - } - return volumetypes.GeneratedOperations{ OperationName: "volume_fs_resize", OperationFunc: fsResizeFunc, @@ -1718,6 +1514,7 @@ func (og *operationGenerator) nodeExpandVolume(volumeToMount VolumeToMount, rsOp simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system") klog.Warningf(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) + og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) return true, nil } rsOpts.VolumeSpec = volumeToMount.VolumeSpec @@ -1735,6 +1532,7 @@ func (og *operationGenerator) nodeExpandVolume(volumeToMount VolumeToMount, rsOp } simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume succeeded", "") og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) + og.recorder.Eventf(pvc, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) klog.Infof(detailedMsg) // File system resize succeeded, now update the PVC's Capacity to match the PV's err = util.MarkFSResizeFinished(pvc, pvSpecCap, og.kubeClient) @@ -1760,10 +1558,6 @@ func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount // checkNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels // This ensures that we don't mount a volume that doesn't belong to this node func checkNodeAffinity(og *operationGenerator, volumeToMount VolumeToMount) error { - if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { - return nil - } - pv := volumeToMount.VolumeSpec.PersistentVolume if pv != nil { nodeLabels, err := og.volumePluginMgr.Host.GetNodeLabels() @@ -1799,136 +1593,3 @@ func isDeviceOpened(deviceToDetach AttachedVolume, hostUtil hostutil.HostUtils) } return deviceOpened, nil } - -func useCSIPlugin(vpm *volume.VolumePluginMgr, spec *volume.Spec) bool { - // TODO(#75146) Check whether the driver is installed as well so that - // we can throw a better error when the driver is not installed. - // The error should be of the approximate form: - // fmt.Errorf("in-tree plugin %s is migrated on node %s but driver %s is not installed", pluginName, string(nodeName), driverName) - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { - return false - } - if csilib.IsPVMigratable(spec.PersistentVolume) || csilib.IsInlineMigratable(spec.Volume) { - migratable, err := vpm.IsPluginMigratableBySpec(spec) - if err == nil && migratable { - return true - } - } - return false -} - -func nodeUsingCSIPlugin(og OperationGenerator, spec *volume.Spec, nodeName types.NodeName) (bool, error) { - vpm := og.GetVolumePluginMgr() - - migratable, err := vpm.IsPluginMigratableBySpec(spec) - if err != nil { - return false, err - } - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) || - !utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) || - !migratable { - return false, nil - } - - if len(nodeName) == 0 { - return false, goerrors.New("nodeName is empty") - } - - kubeClient := vpm.Host.GetKubeClient() - if kubeClient == nil { - // Don't handle the controller/kubelet version skew check and fallback - // to just checking the feature gates. This can happen if - // we are in a standalone (headless) Kubelet - return true, nil - } - - adcHost, ok := vpm.Host.(volume.AttachDetachVolumeHost) - if !ok { - // Don't handle the controller/kubelet version skew check and fallback - // to just checking the feature gates. This can happen if - // "enableControllerAttachDetach" is set to true on kubelet - return true, nil - } - - if adcHost.CSINodeLister() == nil { - return false, goerrors.New("could not find CSINodeLister in attachDetachController") - } - - csiNode, err := adcHost.CSINodeLister().Get(string(nodeName)) - if err != nil { - return false, err - } - - ann := csiNode.GetAnnotations() - if ann == nil { - return false, nil - } - - var mpaSet sets.String - mpa := ann[v1.MigratedPluginsAnnotationKey] - tok := strings.Split(mpa, ",") - if len(mpa) == 0 { - mpaSet = sets.NewString() - } else { - mpaSet = sets.NewString(tok...) - } - - pluginName, err := csilib.GetInTreePluginNameFromSpec(spec.PersistentVolume, spec.Volume) - if err != nil { - return false, err - } - - if len(pluginName) == 0 { - // Could not find a plugin name from translation directory, assume not translated - return false, nil - } - - isMigratedOnNode := mpaSet.Has(pluginName) - - if isMigratedOnNode { - installed := false - driverName, err := csilib.GetCSINameFromInTreeName(pluginName) - if err != nil { - return isMigratedOnNode, err - } - for _, driver := range csiNode.Spec.Drivers { - if driver.Name == driverName { - installed = true - break - } - } - if !installed { - return true, fmt.Errorf("in-tree plugin %s is migrated on node %s but driver %s is not installed", pluginName, string(nodeName), driverName) - } - } - - return isMigratedOnNode, nil - -} - -func translateSpec(spec *volume.Spec) (*volume.Spec, error) { - var csiPV *v1.PersistentVolume - var err error - inlineVolume := false - if spec.PersistentVolume != nil { - // TranslateInTreePVToCSI will create a new PV - csiPV, err = csilib.TranslateInTreePVToCSI(spec.PersistentVolume) - if err != nil { - return nil, fmt.Errorf("failed to translate in tree pv to CSI: %v", err) - } - } else if spec.Volume != nil { - // TranslateInTreeInlineVolumeToCSI will create a new PV - csiPV, err = csilib.TranslateInTreeInlineVolumeToCSI(spec.Volume) - if err != nil { - return nil, fmt.Errorf("failed to translate in tree inline volume to CSI: %v", err) - } - inlineVolume = true - } else { - return &volume.Spec{}, goerrors.New("not a valid volume spec") - } - return &volume.Spec{ - PersistentVolume: csiPV, - ReadOnly: spec.ReadOnly, - InlineVolumeSpecForCSIMigration: inlineVolume, - }, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index b35daba380d..0f10f1d57bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -20,6 +20,8 @@ import ( "encoding/json" "fmt" + "k8s.io/utils/mount" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -28,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/resizefs" "k8s.io/kubernetes/pkg/volume" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/BUILD index 64e11ad23d1..3830dc9ae00 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/BUILD @@ -12,49 +12,54 @@ go_library( visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:android": [ - "//pkg/util/mount:go_default_library", - "//vendor/k8s.io/utils/nsenter:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + "//vendor/k8s.io/utils/nsenter:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:plan9": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ - "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ - "//pkg/util/mount:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", ], "//conditions:default": [], @@ -69,9 +74,13 @@ go_test( ], embed = [":go_default_library"], deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go index b497a810dc2..7778c1af02c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go @@ -28,10 +28,8 @@ import ( "syscall" "golang.org/x/sys/unix" - "k8s.io/klog" - - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) const ( @@ -241,6 +239,12 @@ func doCleanSubPaths(mounter mount.Interface, podDir string, volumeName string) if err = doCleanSubPath(mounter, fullContainerDirPath, filepath.Base(path)); err != nil { return err } + + if info.IsDir() { + // skip subdirs of the volume: it only matters the first level to unmount, otherwise it would try to unmount subdir of the volume + return filepath.SkipDir + } + return nil }) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go index a1fd600a286..9b937adcfef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go @@ -22,7 +22,7 @@ import ( "errors" "os" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" "k8s.io/utils/nsenter" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go index 2bbb3c52716..f8987397b63 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go @@ -26,7 +26,7 @@ import ( "syscall" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" "k8s.io/utils/nsenter" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index d349c128003..e3baaa7c3a6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -22,28 +22,31 @@ import ( "os" "path/filepath" "reflect" + "runtime" "strings" + "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" + apiruntime "k8s.io/apimachinery/pkg/runtime" utypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/v1/pod" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) const ( @@ -196,7 +199,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) { pod := &v1.Pod{} codec := legacyscheme.Codecs.UniversalDecoder() - if err := runtime.DecodeInto(codec, podDef, pod); err != nil { + if err := apiruntime.DecodeInto(codec, podDef, pod); err != nil { return nil, fmt.Errorf("failed decoding file: %v", err) } return pod, nil @@ -424,14 +427,6 @@ func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) { return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name()) } -// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc. -func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) { - if claim.Spec.VolumeMode != nil { - return *claim.Spec.VolumeMode, nil - } - return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name) -} - // GetPersistentVolumeClaimQualifiedName returns a qualified name for pvc. func GetPersistentVolumeClaimQualifiedName(claim *v1.PersistentVolumeClaim) string { return utilstrings.JoinQualifiedName(claim.GetNamespace(), claim.GetName()) @@ -508,30 +503,74 @@ func MakeAbsolutePath(goos, path string) string { return "c:\\" + path } -// MapBlockVolume is a utility function to provide a common way of mounting +// MapBlockVolume is a utility function to provide a common way of mapping // block device path for a specified volume and pod. This function should be // called by volume plugins that implements volume.BlockVolumeMapper.Map() method. func MapBlockVolume( + blkUtil volumepathhandler.BlockVolumePathHandler, devicePath, globalMapPath, podVolumeMapPath, volumeMapName string, podUID utypes.UID, ) error { - blkUtil := volumepathhandler.NewBlockVolumePathHandler() - - // map devicePath to global node path - mapErr := blkUtil.MapDevice(devicePath, globalMapPath, string(podUID)) + // map devicePath to global node path as bind mount + mapErr := blkUtil.MapDevice(devicePath, globalMapPath, string(podUID), true /* bindMount */) if mapErr != nil { - return mapErr + return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, globalMapPath:%s, podUID: %s, bindMount: %v: %v", + devicePath, globalMapPath, string(podUID), true, mapErr) } // map devicePath to pod volume path - mapErr = blkUtil.MapDevice(devicePath, podVolumeMapPath, volumeMapName) + mapErr = blkUtil.MapDevice(devicePath, podVolumeMapPath, volumeMapName, false /* bindMount */) + if mapErr != nil { + return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, podVolumeMapPath:%s, volumeMapName: %s, bindMount: %v: %v", + devicePath, podVolumeMapPath, volumeMapName, false, mapErr) + } + + // Take file descriptor lock to keep a block device opened. Otherwise, there is a case + // that the block device is silently removed and attached another device with the same name. + // Container runtime can't handle this problem. To avoid unexpected condition fd lock + // for the block device is required. + _, mapErr = blkUtil.AttachFileDevice(filepath.Join(globalMapPath, string(podUID))) if mapErr != nil { - return mapErr + return fmt.Errorf("blkUtil.AttachFileDevice failed. globalMapPath:%s, podUID: %s: %v", + globalMapPath, string(podUID), mapErr) + } + + return nil +} + +// UnmapBlockVolume is a utility function to provide a common way of unmapping +// block device path for a specified volume and pod. This function should be +// called by volume plugins that implements volume.BlockVolumeMapper.Map() method. +func UnmapBlockVolume( + blkUtil volumepathhandler.BlockVolumePathHandler, + globalUnmapPath, + podDeviceUnmapPath, + volumeMapName string, + podUID utypes.UID, +) error { + // Release file descriptor lock. + err := blkUtil.DetachFileDevice(filepath.Join(globalUnmapPath, string(podUID))) + if err != nil { + return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s: %v", + globalUnmapPath, string(podUID), err) + } + + // unmap devicePath from pod volume path + unmapDeviceErr := blkUtil.UnmapDevice(podDeviceUnmapPath, volumeMapName, false /* bindMount */) + if unmapDeviceErr != nil { + return fmt.Errorf("blkUtil.DetachFileDevice failed. podDeviceUnmapPath:%s, volumeMapName: %s, bindMount: %v: %v", + podDeviceUnmapPath, volumeMapName, false, unmapDeviceErr) } + // unmap devicePath from global node path + unmapDeviceErr = blkUtil.UnmapDevice(globalUnmapPath, string(podUID), true /* bindMount */) + if unmapDeviceErr != nil { + return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s, bindMount: %v: %v", + globalUnmapPath, string(podUID), true, unmapDeviceErr) + } return nil } @@ -590,3 +629,18 @@ func HasMountRefs(mountPath string, mountRefs []string) bool { } return false } + +//WriteVolumeCache flush disk data given the spcified mount path +func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error { + // If runtime os is windows, execute Write-VolumeCache powershell command on the disk + if runtime.GOOS == "windows" { + cmd := fmt.Sprintf("Get-Volume -FilePath %s | Write-Volumecache", deviceMountPath) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() + klog.Infof("command (%q) execeuted: %v, output: %q", cmd, err, string(output)) + if err != nil { + return fmt.Errorf("command (%q) failed: %v, output: %q", cmd, err, string(output)) + } + } + // For linux runtime, it skips because unmount will automatically flush disk data + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD index e344849a517..fdbc4b8b33e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD @@ -12,7 +12,17 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", - ], + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go index a05db3e25b5..9c0983bbf73 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -23,12 +23,15 @@ import ( "path/filepath" "k8s.io/klog" + utilexec "k8s.io/utils/exec" + "k8s.io/utils/mount" "k8s.io/apimachinery/pkg/types" ) const ( losetupPath = "losetup" + statPath = "stat" ErrDeviceNotFound = "device not found" ErrDeviceNotSupported = "device not supported" ) @@ -36,25 +39,28 @@ const ( // BlockVolumePathHandler defines a set of operations for handling block volume-related operations type BlockVolumePathHandler interface { // MapDevice creates a symbolic link to block device under specified map path - MapDevice(devicePath string, mapPath string, linkName string) error + MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error // UnmapDevice removes a symbolic link to block device under specified map path - UnmapDevice(mapPath string, linkName string) error + UnmapDevice(mapPath string, linkName string, bindMount bool) error // RemovePath removes a file or directory on specified map path RemoveMapPath(mapPath string) error // IsSymlinkExist retruns true if specified symbolic link exists IsSymlinkExist(mapPath string) (bool, error) - // GetDeviceSymlinkRefs searches symbolic links under global map path - GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) + // IsDeviceBindMountExist retruns true if specified bind mount exists + IsDeviceBindMountExist(mapPath string) (bool, error) + // GetDeviceBindMountRefs searches bind mounts under global map path + GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error) // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath // corresponding to map path symlink, and then return global map path with pod uuid. FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) // AttachFileDevice takes a path to a regular file and makes it available as an // attached block device. AttachFileDevice(path string) (string, error) + // DetachFileDevice takes a path to the attached block device and + // detach it from block device. + DetachFileDevice(path string) error // GetLoopDevice returns the full path to the loop device associated with the given path. GetLoopDevice(path string) (string, error) - // RemoveLoopDevice removes specified loopback device - RemoveLoopDevice(device string) error } // NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. @@ -68,7 +74,7 @@ type VolumePathHandler struct { } // MapDevice creates a symbolic link to block device under specified map path -func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { +func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error { // Example of global map path: // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} // linkName: {podUid} @@ -77,13 +83,13 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} // linkName: {volumeName} if len(devicePath) == 0 { - return fmt.Errorf("Failed to map device to map path. devicePath is empty") + return fmt.Errorf("failed to map device to map path. devicePath is empty") } if len(mapPath) == 0 { - return fmt.Errorf("Failed to map device to map path. mapPath is empty") + return fmt.Errorf("failed to map device to map path. mapPath is empty") } if !filepath.IsAbs(mapPath) { - return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) + return fmt.Errorf("the map path should be absolute: map path: %s", mapPath) } klog.V(5).Infof("MapDevice: devicePath %s", devicePath) klog.V(5).Infof("MapDevice: mapPath %s", mapPath) @@ -92,31 +98,119 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName // Check and create mapPath _, err := os.Stat(mapPath) if err != nil && !os.IsNotExist(err) { - klog.Errorf("cannot validate map path: %s", mapPath) - return err + return fmt.Errorf("cannot validate map path: %s: %v", mapPath, err) } if err = os.MkdirAll(mapPath, 0750); err != nil { - return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) + return fmt.Errorf("failed to mkdir %s: %v", mapPath, err) } + + if bindMount { + return mapBindMountDevice(v, devicePath, mapPath, linkName) + } + return mapSymlinkDevice(v, devicePath, mapPath, linkName) +} + +func mapBindMountDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error { + // Check bind mount exists + linkPath := filepath.Join(mapPath, string(linkName)) + + file, err := os.Stat(linkPath) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("failed to stat file %s: %v", linkPath, err) + } + + // Create file + newFile, err := os.OpenFile(linkPath, os.O_CREATE|os.O_RDWR, 0750) + if err != nil { + return fmt.Errorf("failed to open file %s: %v", linkPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close file %s: %v", linkPath, err) + } + } else { + // Check if device file + // TODO: Need to check if this device file is actually the expected bind mount + if file.Mode()&os.ModeDevice == os.ModeDevice { + klog.Warningf("Warning: Map skipped because bind mount already exist on the path: %v", linkPath) + return nil + } + + klog.Warningf("Warning: file %s is already exist but not mounted, skip creating file", linkPath) + } + + // Bind mount file + mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()} + if err := mounter.Mount(devicePath, linkPath, "" /* fsType */, []string{"bind"}); err != nil { + return fmt.Errorf("failed to bind mount devicePath: %s to linkPath %s: %v", devicePath, linkPath, err) + } + + return nil +} + +func mapSymlinkDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error { // Remove old symbolic link(or file) then create new one. // This should be done because current symbolic link is // stale across node reboot. linkPath := filepath.Join(mapPath, string(linkName)) - if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { - return err + if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove file %s: %v", linkPath, err) } - err = os.Symlink(devicePath, linkPath) - return err + return os.Symlink(devicePath, linkPath) } // UnmapDevice removes a symbolic link associated to block device under specified map path -func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { +func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string, bindMount bool) error { if len(mapPath) == 0 { - return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") + return fmt.Errorf("failed to unmap device from map path. mapPath is empty") } klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) klog.V(5).Infof("UnmapDevice: linkName %s", linkName) + if bindMount { + return unmapBindMountDevice(v, mapPath, linkName) + } + return unmapSymlinkDevice(v, mapPath, linkName) +} + +func unmapBindMountDevice(v VolumePathHandler, mapPath string, linkName string) error { + // Check bind mount exists + linkPath := filepath.Join(mapPath, string(linkName)) + if isMountExist, checkErr := v.IsDeviceBindMountExist(linkPath); checkErr != nil { + return checkErr + } else if !isMountExist { + klog.Warningf("Warning: Unmap skipped because bind mount does not exist on the path: %v", linkPath) + + // Check if linkPath still exists + if _, err := os.Stat(linkPath); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("failed to check if path %s exists: %v", linkPath, err) + } + // linkPath has already been removed + return nil + } + // Remove file + if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove file %s: %v", linkPath, err) + } + return nil + } + + // Unmount file + mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()} + if err := mounter.Unmount(linkPath); err != nil { + return fmt.Errorf("failed to unmount linkPath %s: %v", linkPath, err) + } + + // Remove file + if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove file %s: %v", linkPath, err) + } + + return nil +} + +func unmapSymlinkDevice(v VolumePathHandler, mapPath string, linkName string) error { // Check symbolic link exists linkPath := filepath.Join(mapPath, string(linkName)) if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { @@ -125,19 +219,18 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) return nil } - err := os.Remove(linkPath) - return err + return os.Remove(linkPath) } // RemoveMapPath removes a file or directory on specified map path func (v VolumePathHandler) RemoveMapPath(mapPath string) error { if len(mapPath) == 0 { - return fmt.Errorf("Failed to remove map path. mapPath is empty") + return fmt.Errorf("failed to remove map path. mapPath is empty") } klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) err := os.RemoveAll(mapPath) if err != nil && !os.IsNotExist(err) { - return err + return fmt.Errorf("failed to remove directory %s: %v", mapPath, err) } return nil } @@ -147,86 +240,59 @@ func (v VolumePathHandler) RemoveMapPath(mapPath string) error { // On other cases, return false with error from Lstat(). func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { fi, err := os.Lstat(mapPath) - if err == nil { - // If file exits and it's symbolic link, return true and no error - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - return true, nil + if err != nil { + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil } - // If file exits but it's not symbolic link, return fale and no error - return false, nil + // Return error from Lstat() + return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err) } - // If file doesn't exist, return false and no error - if os.IsNotExist(err) { - return false, nil + // If file exits and it's symbolic link, return true and no error + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return true, nil } - // Return error from Lstat() - return false, err + // If file exits but it's not symbolic link, return false and no error + return false, nil } -// GetDeviceSymlinkRefs searches symbolic links under global map path -func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { +// IsDeviceBindMountExist returns true if specified file exists and the type is device. +// If file doesn't exist, or file exists but not device, return false with no error. +// On other cases, return false with error from Lstat(). +func (v VolumePathHandler) IsDeviceBindMountExist(mapPath string) (bool, error) { + fi, err := os.Lstat(mapPath) + if err != nil { + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil + } + + // Return error from Lstat() + return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err) + } + // If file exits and it's device, return true and no error + if fi.Mode()&os.ModeDevice == os.ModeDevice { + return true, nil + } + // If file exits but it's not device, return false and no error + return false, nil +} + +// GetDeviceBindMountRefs searches bind mounts under global map path +func (v VolumePathHandler) GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error) { var refs []string files, err := ioutil.ReadDir(mapPath) if err != nil { - return nil, fmt.Errorf("Directory cannot read %v", err) + return nil, fmt.Errorf("directory cannot read %v", err) } for _, file := range files { - if file.Mode()&os.ModeSymlink != os.ModeSymlink { + if file.Mode()&os.ModeDevice != os.ModeDevice { continue } filename := file.Name() - fp, err := os.Readlink(filepath.Join(mapPath, filename)) - if err != nil { - return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) - } - klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", fp, devPath) - if fp == devPath { - refs = append(refs, filepath.Join(mapPath, filename)) - } + // TODO: Might need to check if the file is actually linked to devPath + refs = append(refs, filepath.Join(mapPath, filename)) } - klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + klog.V(5).Infof("GetDeviceBindMountRefs: refs %v", refs) return refs, nil } - -// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath -// corresponding to map path symlink, and then return global map path with pod uuid. -// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX -// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX -func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { - var globalMapPathUUID string - // Find symbolic link named pod uuid under plugin dir - err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { - klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) - if res, err := compareSymlinks(path, mapPath); err == nil && res { - globalMapPathUUID = path - } - } - return nil - }) - if err != nil { - return "", err - } - klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) - // Return path contains global map path + {pod uuid} - return globalMapPathUUID, nil -} - -func compareSymlinks(global, pod string) (bool, error) { - devGlobal, err := os.Readlink(global) - if err != nil { - return false, err - } - devPod, err := os.Readlink(pod) - if err != nil { - return false, err - } - klog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) - if devGlobal == devPod { - return true, nil - } - return false, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index 7170edc7de0..66cc7c73f08 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -19,12 +19,17 @@ limitations under the License. package volumepathhandler import ( + "bufio" "errors" "fmt" "os" "os/exec" + "path/filepath" "strings" + "golang.org/x/sys/unix" + + "k8s.io/apimachinery/pkg/types" "k8s.io/klog" ) @@ -33,7 +38,7 @@ import ( func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { blockDevicePath, err := v.GetLoopDevice(path) if err != nil && err.Error() != ErrDeviceNotFound { - return "", err + return "", fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err) } // If no existing loop device for the path, create one @@ -41,12 +46,33 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path) if err != nil { - return "", err + return "", fmt.Errorf("makeLoopDevice failed for path %s: %v", path, err) } } return blockDevicePath, nil } +// DetachFileDevice takes a path to the attached block device and +// detach it from block device. +func (v VolumePathHandler) DetachFileDevice(path string) error { + loopPath, err := v.GetLoopDevice(path) + if err != nil { + if err.Error() == ErrDeviceNotFound { + klog.Warningf("couldn't find loopback device which takes file descriptor lock. Skip detaching device. device path: %q", path) + } else { + return fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err) + } + } else { + if len(loopPath) != 0 { + err = removeLoopDevice(loopPath) + if err != nil { + return fmt.Errorf("removeLoopDevice failed for path %s: %v", path, err) + } + } + } + return nil +} + // GetLoopDevice returns the full path to the loop device associated with the given path. func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { _, err := os.Stat(path) @@ -62,9 +88,9 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { out, err := cmd.CombinedOutput() if err != nil { klog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) - return "", err + return "", fmt.Errorf("losetup -j %s failed: %v", path, err) } - return parseLosetupOutputForDevice(out) + return parseLosetupOutputForDevice(out, path) } func makeLoopDevice(path string) (string, error) { @@ -73,13 +99,20 @@ func makeLoopDevice(path string) (string, error) { out, err := cmd.CombinedOutput() if err != nil { klog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) - return "", err + return "", fmt.Errorf("losetup -f --show %s failed: %v", path, err) + } + + // losetup -f --show {path} returns device in the format: + // /dev/loop1 + if len(out) == 0 { + return "", errors.New(ErrDeviceNotFound) } - return parseLosetupOutputForDevice(out) + + return strings.TrimSpace(string(out)), nil } -// RemoveLoopDevice removes specified loopback device -func (v VolumePathHandler) RemoveLoopDevice(device string) error { +// removeLoopDevice removes specified loopback device +func removeLoopDevice(device string) error { args := []string{"-d", device} cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() @@ -88,21 +121,121 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error { return nil } klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) - return err + return fmt.Errorf("losetup -d %s failed: %v", device, err) } return nil } -func parseLosetupOutputForDevice(output []byte) (string, error) { +func parseLosetupOutputForDevice(output []byte, path string) (string, error) { if len(output) == 0 { return "", errors.New(ErrDeviceNotFound) } - // losetup returns device in the format: - // /dev/loop1: [0073]:148662 (/dev/sda) - device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0]) + // losetup -j {path} returns device in the format: + // /dev/loop1: [0073]:148662 ({path}) + // /dev/loop2: [0073]:148662 (/dev/sdX) + // + // losetup -j shows all the loop device for the same device that has the same + // major/minor number, by resolving symlink and matching major/minor number. + // Therefore, there will be other path than {path} in output, as shown in above output. + s := string(output) + // Find the line that exact matches to the path, or "({path})" + var matched string + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() { + if strings.HasSuffix(scanner.Text(), "("+path+")") { + matched = scanner.Text() + break + } + } + if len(matched) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + s = matched + + // Get device name, or the 0th field of the output separated with ":". + // We don't need 1st field or later to be splitted, so passing 2 to SplitN. + device := strings.TrimSpace(strings.SplitN(s, ":", 2)[0]) if len(device) == 0 { return "", errors.New(ErrDeviceNotFound) } return device, nil } + +// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +// (See pkg/volume/volume.go for details on a global map path and a pod device map path.) +// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX +// globalMapPath/{pod uuid} bind mount: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + var globalMapPathUUID string + // Find symbolic link named pod uuid under plugin dir + err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if (fi.Mode()&os.ModeDevice == os.ModeDevice) && (fi.Name() == string(podUID)) { + klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + if res, err := compareBindMountAndSymlinks(path, mapPath); err == nil && res { + globalMapPathUUID = path + } + } + return nil + }) + if err != nil { + return "", fmt.Errorf("FindGlobalMapPathUUIDFromPod failed: %v", err) + } + klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + // Return path contains global map path + {pod uuid} + return globalMapPathUUID, nil +} + +// compareBindMountAndSymlinks returns if global path (bind mount) and +// pod path (symlink) are pointing to the same device. +// If there is an error in checking it returns error. +func compareBindMountAndSymlinks(global, pod string) (bool, error) { + // To check if bind mount and symlink are pointing to the same device, + // we need to check if they are pointing to the devices that have same major/minor number. + + // Get the major/minor number for global path + devNumGlobal, err := getDeviceMajorMinor(global) + if err != nil { + return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", global, err) + } + + // Get the symlinked device from the pod path + devPod, err := os.Readlink(pod) + if err != nil { + return false, fmt.Errorf("failed to readlink path %s: %v", pod, err) + } + // Get the major/minor number for the symlinked device from the pod path + devNumPod, err := getDeviceMajorMinor(devPod) + if err != nil { + return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", devPod, err) + } + klog.V(5).Infof("CompareBindMountAndSymlinks: devNumGlobal %s, devNumPod %s", devNumGlobal, devNumPod) + + // Check if the major/minor number are the same + if devNumGlobal == devNumPod { + return true, nil + } + return false, nil +} + +// getDeviceMajorMinor returns major/minor number for the path with below format: +// major:minor (in hex) +// ex) +// fc:10 +func getDeviceMajorMinor(path string) (string, error) { + var stat unix.Stat_t + + if err := unix.Stat(path, &stat); err != nil { + return "", fmt.Errorf("failed to stat path %s: %v", path, err) + } + + devNumber := uint64(stat.Rdev) + major := unix.Major(devNumber) + minor := unix.Minor(devNumber) + + return fmt.Sprintf("%x:%x", major, minor), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go index 266398b1da8..f4411c6cdb7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go @@ -20,6 +20,8 @@ package volumepathhandler import ( "fmt" + + "k8s.io/apimachinery/pkg/types" ) // AttachFileDevice takes a path to a regular file and makes it available as an @@ -28,12 +30,19 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { return "", fmt.Errorf("AttachFileDevice not supported for this build.") } +// DetachFileDevice takes a path to the attached block device and +// detach it from block device. +func (v VolumePathHandler) DetachFileDevice(path string) error { + return fmt.Errorf("DetachFileDevice not supported for this build.") +} + // GetLoopDevice returns the full path to the loop device associated with the given path. func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { return "", fmt.Errorf("GetLoopDevice not supported for this build.") } -// RemoveLoopDevice removes specified loopback device -func (v VolumePathHandler) RemoveLoopDevice(device string) error { - return fmt.Errorf("RemoveLoopDevice not supported for this build.") +// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + return "", fmt.Errorf("FindGlobalMapPathUUIDFromPod not supported for this build.") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume.go index cb78d94515a..31fa8216e3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -41,12 +41,12 @@ type Volume interface { // and pod device map path. type BlockVolume interface { // GetGlobalMapPath returns a global map path which contains - // symbolic links associated to a block device. + // bind mount associated to a block device. // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} GetGlobalMapPath(spec *Spec) (string, error) // GetPodDeviceMapPath returns a pod device map path // and name of a symbolic link associated to a block device. - // ex. pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + // ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/, {volumeName} GetPodDeviceMapPath() (string, string) } @@ -152,35 +152,43 @@ type Unmounter interface { TearDownAt(dir string) error } -// BlockVolumeMapper interface provides methods to set up/map the volume. +// BlockVolumeMapper interface is a mapper interface for block volume. type BlockVolumeMapper interface { BlockVolume - // SetUpDevice prepares the volume to a self-determined directory path, - // which may or may not exist yet and returns combination of physical - // device path of a block volume and error. - // If the plugin is non-attachable, it should prepare the device - // in /dev/ (or where appropriate) and return unique device path. - // Unique device path across kubelet node reboot is required to avoid - // unexpected block volume destruction. - // If the plugin is attachable, it should not do anything here, - // just return empty string for device path. - // Instead, attachable plugin have to return unique device path - // at attacher.Attach() and attacher.WaitForAttach(). +} + +// CustomBlockVolumeMapper interface provides custom methods to set up/map the volume. +type CustomBlockVolumeMapper interface { + BlockVolumeMapper + // SetUpDevice prepares the volume to the node by the plugin specific way. + // For most in-tree plugins, attacher.Attach() and attacher.WaitForAttach() + // will do necessary works. // This may be called more than once, so implementations must be idempotent. - SetUpDevice() (string, error) + SetUpDevice() error - // Map maps the block device path for the specified spec and pod. - MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error + // MapPodDevice maps the block device to a path and return the path. + // Unique device path across kubelet node reboot is required to avoid + // unexpected block volume destruction. + // If empty string is returned, the path retuned by attacher.Attach() and + // attacher.WaitForAttach() will be used. + MapPodDevice() (string, error) } -// BlockVolumeUnmapper interface provides methods to cleanup/unmap the volumes. +// BlockVolumeUnmapper interface is an unmapper interface for block volume. type BlockVolumeUnmapper interface { BlockVolume - // TearDownDevice removes traces of the SetUpDevice procedure under - // a self-determined directory. +} + +// CustomBlockVolumeUnmapper interface provides custom methods to cleanup/unmap the volumes. +type CustomBlockVolumeUnmapper interface { + BlockVolumeUnmapper + // TearDownDevice removes traces of the SetUpDevice procedure. // If the plugin is non-attachable, this method detaches the volume // from a node. TearDownDevice(mapPath string, devicePath string) error + + // UnmapPodDevice removes traces of the MapPodDevice procedure. + UnmapPodDevice() error } // Provisioner is an interface that creates templates for PersistentVolumes diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index 18981b9e9f8..5108eb00da8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -42,6 +42,8 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { return nil } + klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) + return filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error { if err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD index 4995fc2ca93..b4bab5c5ef9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD @@ -21,7 +21,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume", deps = [ "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", @@ -36,6 +35,7 @@ go_library( "//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) @@ -49,7 +49,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -61,6 +60,7 @@ go_test( "//staging/src/k8s.io/legacy-cloud-providers/vsphere:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS index 816453043f4..1ab66a94490 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS @@ -4,13 +4,13 @@ approvers: - pmorie - saad-ali - thockin -- matchstick - SandeepPissay - divyenpatel - BaluDontu - abrarshivani +emeritus_approvers: +- matchstick reviewers: -- abithap - abrarshivani - saad-ali - justinsb diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go index bce39b701ae..8c247167858 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go @@ -25,14 +25,15 @@ import ( "runtime" "time" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/keymutex" + "k8s.io/utils/mount" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/legacy-cloud-providers/vsphere" - "k8s.io/utils/keymutex" ) type vsphereVMDKAttacher struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index b6de7fba54a..2d023da3b1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -25,18 +25,19 @@ import ( "runtime" "strings" - "k8s.io/api/core/v1" + "k8s.io/klog" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - utilstrings "k8s.io/utils/strings" ) // This is the primary entrypoint for volume plugins. @@ -85,10 +86,6 @@ func (plugin *vsphereVolumePlugin) CanSupport(spec *volume.Spec) bool { (spec.Volume != nil && spec.Volume.VsphereVolume != nil) } -func (plugin *vsphereVolumePlugin) IsMigratedToCSI() bool { - return false -} - func (plugin *vsphereVolumePlugin) RequiresRemount() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go index b660cd3cd4d..657fd611ebe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go @@ -23,14 +23,15 @@ import ( "path/filepath" "strings" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" + utilstrings "k8s.io/utils/strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - utilstrings "k8s.io/utils/strings" ) var _ volume.BlockVolumePlugin = &vsphereVolumePlugin{} @@ -49,10 +50,10 @@ func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, vo if len(globalMapPath) <= 1 { return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Construct volume spec from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -64,6 +65,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock vsphereVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ @@ -129,24 +133,12 @@ type vsphereBlockVolumeMapper struct { *vsphereVolume } -func (v vsphereBlockVolumeMapper) SetUpDevice() (string, error) { - return "", nil -} - -func (v vsphereBlockVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) -} - var _ volume.BlockVolumeUnmapper = &vsphereBlockVolumeUnmapper{} type vsphereBlockVolumeUnmapper struct { *vsphereVolume } -func (v *vsphereBlockVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error { - return nil -} - // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumePath func (v *vsphereVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_linux.go index 9e30e1c1442..3ad003b880f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_linux.go @@ -1,3 +1,4 @@ +// +build !providerless // +build linux /* @@ -21,7 +22,7 @@ package vsphere_volume import ( "fmt" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/mount" ) func verifyDevicePath(path string) (string, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_unsupported.go index 2f77a1bc64d..10de28ce16c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_unsupported.go @@ -1,3 +1,4 @@ +// +build !providerless // +build !linux,!windows /* diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go index 5b075482563..a57b176371e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go @@ -1,3 +1,4 @@ +// +build !providerless // +build windows /* diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go index 47961b842ef..35d8e19bbc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go @@ -83,7 +83,7 @@ Loop: s <- c.CurrentStatus case svc.Stop, svc.Shutdown: klog.Infof("Service stopping") - // We need to translate this request into a signal that can be handled by the the signal handler + // We need to translate this request into a signal that can be handled by the signal handler // handling shutdowns normally (currently apiserver/pkg/server/signal.go). // If we do not do this, our main threads won't be notified of the upcoming shutdown. // Since Windows services do not use any console, we cannot simply generate a CTRL_BREAK_EVENT diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/admission_webhook.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/admission_webhook.go index c26556e9781..e4d7d11641c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/admission_webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/admission_webhook.go @@ -86,26 +86,52 @@ func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview) } // LocalhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var LocalhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBjzCCATmgAwIBAgIRAKpi2WmTcFrVjxrl5n5YDUEwDQYJKoZIhvcNAQELBQAw -EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2 -MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgC -QQC9fEbRszP3t14Gr4oahV7zFObBI4TfA5i7YnlMXeLinb7MnvT4bkfOJzE6zktn -59zP7UiHs3l4YOuqrjiwM413AgMBAAGjaDBmMA4GA1UdDwEB/wQEAwICpDATBgNV -HSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MC4GA1UdEQQnMCWCC2V4 -YW1wbGUuY29thwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUA -A0EAUsVE6KMnza/ZbodLlyeMzdo7EM/5nb5ywyOxgIOCf0OOLHsPS9ueGLQX9HEG -//yjTXuhNcUugExIjM/AIwAZPQ== +MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw +MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqWLX6S +4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOTheZ+ +3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuNr3X9 +erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyUVY/T +cukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+2EFa +a8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABo2gwZjAOBgNVHQ8BAf8EBAMC +AqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAuBgNVHREE +JzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG +9w0BAQsFAAOCAQEAThqgJ/AFqaANsOp48lojDZfZBFxJQ3A4zfR/MgggUoQ9cP3V +rxuKAFWQjze1EZc7J9iO1WvH98lOGVNRY/t2VIrVoSsBiALP86Eew9WucP60tbv2 +8/zsBDSfEo9Wl+Q/gwdEh8dgciUKROvCm76EgAwPGicMAgRsxXgwXHhS5e8nnbIE +Ewaqvb5dY++6kh0Oz+adtNT5OqOwXTIRI67WuEe6/B3Z4LNVPQDIj7ZUJGNw8e6L +F4nkUthwlKx4yEJHZBRuFPnO7Z81jNKuwL276+mczRH7piI6z9uyMV/JbEsOIxyL +W6CzB7pZ9Nj1YLpgzc1r6oONHLokMJJIz/IvkQ== -----END CERTIFICATE-----`) // LocalhostKey is the private key for LocalhostCert. var LocalhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBAL18RtGzM/e3XgavihqFXvMU5sEjhN8DmLtieUxd4uKdvsye9Phu -R84nMTrOS2fn3M/tSIezeXhg66quOLAzjXcCAwEAAQJBAKcRxH9wuglYLBdI/0OT -BLzfWPZCEw1vZmMR2FF1Fm8nkNOVDPleeVGTWoOEcYYlQbpTmkGSxJ6ya+hqRi6x -goECIQDx3+X49fwpL6B5qpJIJMyZBSCuMhH4B7JevhGGFENi3wIhAMiNJN5Q3UkL -IuSvv03kaPR5XVQ99/UeEetUgGvBcABpAiBJSBzVITIVCGkGc7d+RCf49KTCIklv -bGWObufAR8Ni4QIgWpILjW8dkGg8GOUZ0zaNA6Nvt6TIv2UWGJ4v5PoV98kCIQDx -rIiZs5QbKdycsv9gQJzwQAogC8o04X3Zz3dsoX+h4A== +MIIEowIBAAKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqW +LX6S4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOT +heZ+3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuN +r3X9erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyU +VY/TcukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+ +2EFaa8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABAoIBAFAJmb1pMIy8OpFO +hnOcYWoYepe0vgBiIOXJy9n8R7vKQ1X2f0w+b3SHw6eTd1TLSjAhVIEiJL85cdwD +MRTdQrXA30qXOioMzUa8eWpCCHUpD99e/TgfO4uoi2dluw+pBx/WUyLnSqOqfLDx +S66kbeFH0u86jm1hZibki7pfxLbxvu7KQgPe0meO5/13Retztz7/xa/pWIY71Zqd +YC8UckuQdWUTxfuQf0470lAK34GZlDy9tvdVOG/PmNkG4j6OQjy0Kmz4Uk7rewKo +ZbdphaLPJ2A4Rdqfn4WCoyDnxlfV861T922/dEDZEbNWiQpB81G8OfLL+FLHxyIT +LKEu4R0CgYEA4RDj9jatJ/wGkMZBt+UF05mcJlRVMEijqdKgFwR2PP8b924Ka1mj +9zqWsfbxQbdPdwsCeVBZrSlTEmuFSQLeWtqBxBKBTps/tUP0qZf7HjfSmcVI89WE +3ab8LFjfh4PtK/LOq2D1GRZZkFliqi0gKwYdDoK6gxXWwrumXq4c2l8CgYEA8vrX +dMuGCNDjNQkGXx3sr8pyHCDrSNR4Z4FrSlVUkgAW1L7FrCM911BuGh86FcOu9O/1 +Ggo0E8ge7qhQiXhB5vOo7hiVzSp0FxxCtGSlpdp4W6wx6ZWK8+Pc+6Moos03XdG7 +MKsdPGDciUn9VMOP3r8huX/btFTh90C/L50sH/cCgYAd02wyW8qUqux/0RYydZJR +GWE9Hx3u+SFfRv9aLYgxyyj8oEOXOFjnUYdY7D3KlK1ePEJGq2RG81wD6+XM6Clp +Zt2di0pBjYdi0S+iLfbkaUdqg1+ImLoz2YY/pkNxJQWQNmw2//FbMsAJxh6yKKrD +qNq+6oonBwTf55hDodVHBwKBgEHgEBnyM9ygBXmTgM645jqiwF0v75pHQH2PcO8u +Q0dyDr6PGjiZNWLyw2cBoFXWP9DYXbM5oPTcBMbfizY6DGP5G4uxzqtZHzBE0TDn +OKHGoWr5PG7/xDRrSrZOfe3lhWVCP2XqfnqoKCJwlOYuPws89n+8UmyJttm6DBt0 +mUnxAoGBAIvbR87ZFXkvqstLs4KrdqTz4TQIcpzB3wENukHODPA6C1gzWTqp+OEe +GMNltPfGCLO+YmoMQuTpb0kECYV3k4jR3gXO6YvlL9KbY+UOA6P0dDX4ROi2Rklj +yh+lxFLYa1vlzzi9r8B7nkR9hrOGMvkfXF42X89g7lx4uMtu2I4q -----END RSA PRIVATE KEY-----`) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit.go index 3bb9c5b4322..0156303f60b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit.go @@ -137,7 +137,6 @@ func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, er // existingEvents holds a slice of audit events that have been seen existingEvents := []AuditEvent{} duplicates := auditinternal.EventList{} - var err error for _, e := range el.Items { event, err := testEventFromInternal(&e) if err != nil { @@ -147,12 +146,17 @@ func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, er for _, existing := range existingEvents { if reflect.DeepEqual(existing, event) { duplicates.Items = append(duplicates.Items, e) - err = fmt.Errorf("failed duplicate check") continue } } existingEvents = append(existingEvents, event) } + + var err error + if len(duplicates.Items) > 0 { + err = fmt.Errorf("failed duplicate check") + } + return duplicates, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go index fdb85e4161c..d4a58dee314 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go @@ -95,8 +95,6 @@ func (a *AuditTestServer) WaitForEvents(expected []AuditEvent) ([]AuditEvent, er var missing []AuditEvent err := wait.PollImmediate(50*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { var err error - a.LockedEventList.RLock() - defer a.LockedEventList.RUnlock() el := a.GetEventList() if len(el.Items) < 1 { return false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go index 3c375a3991a..d5bee2dfdee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -111,25 +111,27 @@ type RunObjectConfig interface { } type RCConfig struct { - Affinity *v1.Affinity - Client clientset.Interface - ScalesGetter scaleclient.ScalesGetter - Image string - Command []string - Name string - Namespace string - PollInterval time.Duration - Timeout time.Duration - PodStatusFile *os.File - Replicas int - CpuRequest int64 // millicores - CpuLimit int64 // millicores - MemRequest int64 // bytes - MemLimit int64 // bytes - GpuLimit int64 // count - ReadinessProbe *v1.Probe - DNSPolicy *v1.DNSPolicy - PriorityClassName string + Affinity *v1.Affinity + Client clientset.Interface + ScalesGetter scaleclient.ScalesGetter + Image string + Command []string + Name string + Namespace string + PollInterval time.Duration + Timeout time.Duration + PodStatusFile *os.File + Replicas int + CpuRequest int64 // millicores + CpuLimit int64 // millicores + MemRequest int64 // bytes + MemLimit int64 // bytes + GpuLimit int64 // count + ReadinessProbe *v1.Probe + DNSPolicy *v1.DNSPolicy + PriorityClassName string + TerminationGracePeriodSeconds *int64 + Lifecycle *v1.Lifecycle // Env vars, set the same for every pod. Env map[string]string @@ -159,6 +161,9 @@ type RCConfig struct { // Maximum allowable container failures. If exceeded, RunRC returns an error. // Defaults to replicas*0.1 if unspecified. MaxContainerFailures *int + // Maximum allowed pod deletions count. If exceeded, RunRC returns an error. + // Defaults to 0. + MaxAllowedPodDeletions int // If set to false starting RC will print progress, otherwise only errors will be printed. Silent bool @@ -321,13 +326,15 @@ func (config *DeploymentConfig) create() error { Annotations: config.Annotations, }, Spec: v1.PodSpec{ - Affinity: config.Affinity, + Affinity: config.Affinity, + TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Lifecycle: config.Lifecycle, }, }, }, @@ -401,13 +408,15 @@ func (config *ReplicaSetConfig) create() error { Annotations: config.Annotations, }, Spec: v1.PodSpec{ - Affinity: config.Affinity, + Affinity: config.Affinity, + TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Lifecycle: config.Lifecycle, }, }, }, @@ -473,12 +482,14 @@ func (config *JobConfig) create() error { Annotations: config.Annotations, }, Spec: v1.PodSpec{ - Affinity: config.Affinity, + Affinity: config.Affinity, + TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Lifecycle: config.Lifecycle, }, }, RestartPolicy: v1.RestartPolicyOnFailure, @@ -593,12 +604,13 @@ func (config *RCConfig) create() error { Command: config.Command, Ports: []v1.ContainerPort{{ContainerPort: 80}}, ReadinessProbe: config.ReadinessProbe, + Lifecycle: config.Lifecycle, }, }, DNSPolicy: *config.DNSPolicy, NodeSelector: config.NodeSelector, Tolerations: config.Tolerations, - TerminationGracePeriodSeconds: &one, + TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(&one), PriorityClassName: config.PriorityClassName, }, }, @@ -675,6 +687,9 @@ func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) { if config.GpuLimit > 0 { template.Spec.Containers[0].Resources.Limits["nvidia.com/gpu"] = *resource.NewQuantity(config.GpuLimit, resource.DecimalSI) } + if config.Lifecycle != nil { + template.Spec.Containers[0].Lifecycle = config.Lifecycle + } if len(config.Volumes) > 0 { template.Spec.Volumes = config.Volumes } @@ -783,6 +798,7 @@ func (config *RCConfig) start() error { oldPods := make([]*v1.Pod, 0) oldRunning := 0 lastChange := time.Now() + podDeletionsCount := 0 for oldRunning != config.Replicas { time.Sleep(interval) @@ -813,9 +829,10 @@ func (config *RCConfig) start() error { diff := Diff(oldPods, pods) deletedPods := diff.DeletedPods() - if len(deletedPods) != 0 { - // There are some pods that have disappeared. - err := fmt.Errorf("%d pods disappeared for %s: %v", len(deletedPods), config.Name, strings.Join(deletedPods, ", ")) + podDeletionsCount += len(deletedPods) + if podDeletionsCount > config.MaxAllowedPodDeletions { + // Number of pods which disappeared is over threshold + err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", ")) config.RCConfigLog(err.Error()) config.RCConfigLog(diff.String(sets.NewString())) return err @@ -1537,6 +1554,13 @@ func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) { template.Spec.Containers[0].VolumeMounts = mounts } +func (config *RCConfig) getTerminationGracePeriodSeconds(defaultGrace *int64) *int64 { + if config.TerminationGracePeriodSeconds == nil || *config.TerminationGracePeriodSeconds < 0 { + return defaultGrace + } + return config.TerminationGracePeriodSeconds +} + func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name string) { template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/BUILD index 1d7542aaf41..0a4b5b69107 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/BUILD @@ -66,7 +66,6 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go index 3fc5cdcab65..4320d8861cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go @@ -47,7 +47,7 @@ import ( "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/sts" "gopkg.in/gcfg.v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" @@ -63,7 +63,7 @@ import ( "k8s.io/client-go/pkg/version" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - "k8s.io/cloud-provider" + cloudprovider "k8s.io/cloud-provider" nodehelpers "k8s.io/cloud-provider/node/helpers" servicehelpers "k8s.io/cloud-provider/service/helpers" cloudvolume "k8s.io/cloud-provider/volume" @@ -219,13 +219,15 @@ const nodeWithImpairedVolumes = "NodeWithImpairedVolumes" const ( // volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach volumeAttachmentStatusConsecutiveErrorLimit = 10 - // most attach/detach operations on AWS finish within 1-4 seconds - // By using 1 second starting interval with a backoff of 1.8 - // we get - [1, 1.8, 3.24, 5.832000000000001, 10.4976] - // in total we wait for 2601 seconds - volumeAttachmentStatusInitialDelay = 1 * time.Second - volumeAttachmentStatusFactor = 1.8 - volumeAttachmentStatusSteps = 13 + + // Attach typically takes 2-5 seconds (average is 2). Asking before 2 seconds is just waste of API quota. + volumeAttachmentStatusInitialDelay = 2 * time.Second + // Detach typically takes 5-10 seconds (average is 6). Asking before 5 seconds is just waste of API quota. + volumeDetachmentStatusInitialDelay = 5 * time.Second + // After the initial delay, poll attach/detach with exponential backoff (2046 seconds total) + volumeAttachmentStatusPollDelay = 2 * time.Second + volumeAttachmentStatusFactor = 2 + volumeAttachmentStatusSteps = 11 // createTag* is configuration of exponential backoff for CreateTag call. We // retry mainly because if we create an object, we cannot tag it until it is @@ -1404,14 +1406,35 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No return nil, fmt.Errorf("error querying AWS metadata for %q: %q", "network/interfaces/macs", err) } + // We want the IPs to end up in order by interface (in particular, we want eth0's + // IPs first), but macs isn't necessarily sorted in that order so we have to + // explicitly order by device-number (device-number == the "0" in "eth0"). + macIPs := make(map[int]string) for _, macID := range strings.Split(macs, "\n") { if macID == "" { continue } - macPath := path.Join("network/interfaces/macs/", macID, "local-ipv4s") - internalIPs, err := c.metadata.GetMetadata(macPath) + numPath := path.Join("network/interfaces/macs/", macID, "device-number") + numStr, err := c.metadata.GetMetadata(numPath) + if err != nil { + return nil, fmt.Errorf("error querying AWS metadata for %q: %q", numPath, err) + } + num, err := strconv.Atoi(strings.TrimSpace(numStr)) if err != nil { - return nil, fmt.Errorf("error querying AWS metadata for %q: %q", macPath, err) + klog.Warningf("Bad device-number %q for interface %s\n", numStr, macID) + continue + } + ipPath := path.Join("network/interfaces/macs/", macID, "local-ipv4s") + macIPs[num], err = c.metadata.GetMetadata(ipPath) + if err != nil { + return nil, fmt.Errorf("error querying AWS metadata for %q: %q", ipPath, err) + } + } + + for i := 0; i < len(macIPs); i++ { + internalIPs := macIPs[i] + if internalIPs == "" { + continue } for _, internalIP := range strings.Split(internalIPs, "\n") { if internalIP == "" { @@ -1840,6 +1863,7 @@ func (c *Cloud) getMountDevice( assign bool) (assigned mountDevice, alreadyAttached bool, err error) { deviceMappings := map[mountDevice]EBSVolumeID{} + volumeStatus := map[EBSVolumeID]string{} // for better logging of volume status for _, blockDevice := range info.BlockDeviceMappings { name := aws.StringValue(blockDevice.DeviceName) if strings.HasPrefix(name, "/dev/sd") { @@ -1851,6 +1875,10 @@ func (c *Cloud) getMountDevice( if len(name) < 1 || len(name) > 2 { klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) } + if blockDevice.Ebs != nil && blockDevice.Ebs.VolumeId != nil { + volumeStatus[EBSVolumeID(*blockDevice.Ebs.VolumeId)] = aws.StringValue(blockDevice.Ebs.Status) + } + deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) } @@ -1868,7 +1896,15 @@ func (c *Cloud) getMountDevice( for mountDevice, mappingVolumeID := range deviceMappings { if volumeID == mappingVolumeID { if assign { - klog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) + // DescribeInstances shows the volume as attached / detaching, while Kubernetes + // cloud provider thinks it's detached. + // This can happened when the volume has just been detached from the same node + // and AWS API returns stale data in this DescribeInstances ("eventual consistency"). + // Fail the attachment and let A/D controller retry in a while, hoping that + // AWS API returns consistent result next time (i.e. the volume is detached). + status := volumeStatus[mappingVolumeID] + klog.Warningf("Got assignment call for already-assigned volume: %s@%s, volume status: %s", mountDevice, mappingVolumeID, status) + return mountDevice, false, fmt.Errorf("volume is still being detached from the node") } return mountDevice, true, nil } @@ -2071,7 +2107,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) // On success, it returns the last attachment state. func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error) { backoff := wait.Backoff{ - Duration: volumeAttachmentStatusInitialDelay, + Duration: volumeAttachmentStatusPollDelay, Factor: volumeAttachmentStatusFactor, Steps: volumeAttachmentStatusSteps, } @@ -2080,6 +2116,12 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, // So we tolerate a limited number of failures. // But once we see more than 10 errors in a row, we return the error describeErrorCount := 0 + + // Attach/detach usually takes time. It does not make sense to start + // polling DescribeVolumes before some initial delay to let AWS + // process the request. + time.Sleep(getInitialAttachDetachDelay(status)) + var attachment *ec2.VolumeAttachment err := wait.ExponentialBackoff(backoff, func() (bool, error) { @@ -2143,7 +2185,6 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) return false, nil }) - return attachment, err } @@ -4607,3 +4648,10 @@ func setNodeDisk( } volumeMap[volumeID] = check } + +func getInitialAttachDetachDelay(status string) time.Duration { + if status == "detached" { + return volumeDetachmentStatusInitialDelay + } + return volumeAttachmentStatusInitialDelay +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go index 05a2ad6c00e..ef77b502444 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go @@ -19,6 +19,8 @@ limitations under the License. package aws import ( + "fmt" + "sort" "strings" "github.com/aws/aws-sdk-go/aws" @@ -335,7 +337,13 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) { return aws.StringValue(i.PublicIpAddress), nil } else if strings.HasPrefix(key, networkInterfacesPrefix) { if key == networkInterfacesPrefix { - return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil + // Return the MACs sorted lexically rather than in device-number + // order; this matches AWS's observed behavior and lets us test + // that we fix up the ordering correctly in NodeAddresses(). + macs := make([]string, len(m.aws.networkInterfacesMacs)) + copy(macs, m.aws.networkInterfacesMacs) + sort.Strings(macs) + return strings.Join(macs, "/\n") + "/\n", nil } keySplit := strings.Split(key, "/") @@ -347,6 +355,13 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) { } } } + if len(keySplit) == 5 && keySplit[4] == "device-number" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return fmt.Sprintf("%d\n", i), nil + } + } + } if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { for i, macElem := range m.aws.networkInterfacesMacs { if macParam == macElem { diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go index 90153ec35ce..6d6789edbd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go @@ -21,8 +21,6 @@ package aws import ( "sync" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -55,14 +53,14 @@ var ( func recordAWSMetric(actionName string, timeTaken float64, err error) { if err != nil { - awsAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc() + awsAPIErrorMetric.With(metrics.Labels{"request": actionName}).Inc() } else { - awsAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(timeTaken) + awsAPIMetric.With(metrics.Labels{"request": actionName}).Observe(timeTaken) } } func recordAWSThrottlesMetric(operation string) { - awsAPIThrottlesMetric.With(prometheus.Labels{"operation_name": operation}).Inc() + awsAPIThrottlesMetric.With(metrics.Labels{"operation_name": operation}).Inc() } var registerOnce sync.Once diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/BUILD index d410cf087c2..df5b16f3b52 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/BUILD @@ -29,6 +29,7 @@ go_library( "azure_standard.go", "azure_storage.go", "azure_storageaccount.go", + "azure_utils.go", "azure_vmsets.go", "azure_vmss.go", "azure_vmss_cache.go", @@ -76,7 +77,6 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/keymutex:go_default_library", "//vendor/k8s.io/utils/net:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], @@ -98,6 +98,7 @@ go_test( "azure_storage_test.go", "azure_storageaccount_test.go", "azure_test.go", + "azure_utils_test.go", "azure_vmss_cache_test.go", "azure_vmss_test.go", "azure_wrap_test.go", @@ -121,6 +122,7 @@ go_test( "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 81c586d03cb..63be11c2199 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -34,6 +34,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -202,6 +203,8 @@ type Cloud struct { metadata *InstanceMetadataService vmSet VMSet + // ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates + ipv6DualStackEnabled bool // Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes. nodeCachesLock sync.Mutex // nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone @@ -260,6 +263,18 @@ func init() { // NewCloud returns a Cloud with initialized clients func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { + az, err := NewCloudWithoutFeatureGates(configReader) + if err != nil { + return nil, err + } + az.ipv6DualStackEnabled = utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) + + return az, nil +} + +// NewCloudWithoutFeatureGates returns a Cloud without trying to wire the feature gates. This is used by the unit tests +// that don't load the actual features being used in the cluster. +func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) { config, err := parseConfig(configReader) if err != nil { return nil, err @@ -271,6 +286,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { unmanagedNodes: sets.NewString(), routeCIDRs: map[string]string{}, } + err = az.InitializeCloudFromConfig(config, false) if err != nil { return nil, err @@ -598,6 +614,7 @@ func initDiskControllers(az *Cloud) error { resourceGroup: az.ResourceGroup, subscriptionID: az.SubscriptionID, cloud: az, + vmLockMap: newLockMap(), } az.BlobDiskController = &BlobDiskController{common: common} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index eeefe41c005..a45905f7a30 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -65,11 +65,11 @@ func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) { } // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry -func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { +func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt cacheReadType) (compute.VirtualMachine, error) { var machine compute.VirtualMachine var retryErr error err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - machine, retryErr = az.getVirtualMachine(name) + machine, retryErr = az.getVirtualMachine(name, crt) if retryErr == cloudprovider.InstanceNotFound { return true, cloudprovider.InstanceNotFound } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go index 58114416258..d6b5ee5e96f 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go @@ -300,10 +300,9 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) return *v.Value, nil } + c.accounts[SAName].key = *v.Value + return c.accounts[SAName].key, nil } - - c.accounts[SAName].key = *v.Value - return c.accounts[SAName].key, nil } return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go index 90bc7e163ad..38f21378b49 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go @@ -26,6 +26,20 @@ import ( "k8s.io/client-go/tools/cache" ) +// cacheReadType defines the read type for cache data +type cacheReadType int + +const ( + // cacheReadTypeDefault returns data from cache if cache entry not expired + // if cache entry expired, then it will refetch the data using getter + // save the entry in cache and then return + cacheReadTypeDefault cacheReadType = iota + // cacheReadTypeUnsafe returns data from cache even if the cache entry is + // active/expired. If entry doesn't exist in cache, then data is fetched + // using getter, saved in cache and returned + cacheReadTypeUnsafe +) + // getFunc defines a getter function for timedCache. type getFunc func(key string) (interface{}, error) @@ -36,6 +50,8 @@ type cacheEntry struct { // The lock to ensure not updating same entry simultaneously. lock sync.Mutex + // time when entry was fetched and created + createdOn time.Time } // cacheKeyFunc defines the key function required in TTLStore. @@ -48,6 +64,7 @@ type timedCache struct { store cache.Store lock sync.Mutex getter getFunc + ttl time.Duration } // newTimedcache creates a new timedCache. @@ -58,7 +75,11 @@ func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) { return &timedCache{ getter: getter, - store: cache.NewTTLStore(cacheKeyFunc, ttl), + // switch to using NewStore instead of NewTTLStore so that we can + // reuse entries for calls that are fine with reading expired/stalled data. + // with NewTTLStore, entries are not returned if they have already expired. + store: cache.NewStore(cacheKeyFunc), + ttl: ttl, }, nil } @@ -69,19 +90,15 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) { if err != nil { return nil, err } + // if entry exists, return the entry if exists { return entry.(*cacheEntry), nil } + // lock here to ensure if entry doesn't exist, we add a new entry + // avoiding overwrites t.lock.Lock() defer t.lock.Unlock() - entry, exists, err = t.store.GetByKey(key) - if err != nil { - return nil, err - } - if exists { - return entry.(*cacheEntry), nil - } // Still not found, add new entry with nil data. // Note the data will be filled later by getter. @@ -94,26 +111,38 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) { } // Get returns the requested item by key. -func (t *timedCache) Get(key string) (interface{}, error) { +func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) { entry, err := t.getInternal(key) if err != nil { return nil, err } - // Data is still not cached yet, cache it by getter. - if entry.data == nil { - entry.lock.Lock() - defer entry.lock.Unlock() + entry.lock.Lock() + defer entry.lock.Unlock() - if entry.data == nil { - data, err := t.getter(key) - if err != nil { - return nil, err - } - - entry.data = data + // entry exists + if entry.data != nil { + // allow unsafe read, so return data even if expired + if crt == cacheReadTypeUnsafe { + return entry.data, nil + } + // if cached data is not expired, return cached data + if time.Since(entry.createdOn) < t.ttl { + return entry.data, nil } } + // Data is not cached yet or cache data is expired, cache it by getter. + // entry is locked before getting to ensure concurrent gets don't result in + // multiple ARM calls. + data, err := t.getter(key) + if err != nil { + return nil, err + } + + // set the data in cache and also set the last update time + // to now as the data was recently fetched + entry.data = data + entry.createdOn = time.Now().UTC() return entry.data, nil } @@ -129,7 +158,8 @@ func (t *timedCache) Delete(key string) error { // It is only used for testing. func (t *timedCache) Set(key string, data interface{}) { t.store.Add(&cacheEntry{ - key: key, - data: data, + key: key, + data: data, + createdOn: time.Now().UTC(), }) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_client.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_client.go index a273776ca62..822bcf82f6b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -106,8 +106,7 @@ type VirtualMachineScaleSetsClient interface { // VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient type VirtualMachineScaleSetVMsClient interface { - Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) - GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, err error) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) } @@ -191,7 +190,9 @@ func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) { // /* Write rate limiting */ + mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "VMCreateOrUpdate") return } @@ -201,7 +202,6 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() - mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters) if err != nil { return future.Response(), err @@ -213,8 +213,10 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG } func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (resp *http.Response, err error) { + mc := newMetricContext("vm", "update", resourceGroupName, az.client.SubscriptionID, source) // /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "VMUpdate") return } @@ -224,7 +226,6 @@ func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName klog.V(10).Infof("azVirtualMachinesClient.Update(%q, %q): end", resourceGroupName, VMName) }() - mc := newMetricContext("vm", "update", resourceGroupName, az.client.SubscriptionID, source) future, err := az.client.Update(ctx, resourceGroupName, VMName, parameters) if err != nil { return future.Response(), err @@ -236,7 +237,9 @@ func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName } func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { + mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMGet") return } @@ -246,14 +249,15 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() - mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, VMName, expand) mc.Observe(err) return } func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) { + mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMList") return } @@ -263,7 +267,6 @@ func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName s klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -308,8 +311,10 @@ func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient { } func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) { + mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "NiCreateOrUpdate") return } @@ -319,7 +324,6 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() - mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters) if err != nil { return future.Response(), mc.Observe(err) @@ -330,7 +334,9 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN } func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { + mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "NicGet") return } @@ -340,14 +346,15 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() - mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, networkInterfaceName, expand) mc.Observe(err) return } func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { + mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface") return } @@ -357,7 +364,6 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx cont klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() - mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) mc.Observe(err) return @@ -389,8 +395,10 @@ func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient { } func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) (resp *http.Response, err error) { + mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "LBCreateOrUpdate") return nil, err } @@ -400,7 +408,6 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, parameters, etag) if err != nil { return nil, mc.Observe(err) @@ -443,8 +450,10 @@ func (az *azLoadBalancersClient) createOrUpdatePreparer(ctx context.Context, res } func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) { + mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "LBDelete") return nil, err } @@ -454,7 +463,6 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, loadBalancerName) if err != nil { return future.Response(), mc.Observe(err) @@ -465,7 +473,9 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s } func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { + mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "LBGet") return } @@ -475,14 +485,15 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, loadBalancerName, expand) mc.Observe(err) return } func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) { + mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err := createRateLimitErr(false, "LBList") return nil, err } @@ -492,7 +503,6 @@ func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName str klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -537,8 +547,10 @@ func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesCl } func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) { + mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "PublicIPCreateOrUpdate") return nil, err } @@ -548,7 +560,6 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters) if err != nil { return future.Response(), mc.Observe(err) @@ -559,8 +570,10 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc } func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) { + mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "PublicIPDelete") return nil, err } @@ -570,7 +583,6 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, publicIPAddressName) if err != nil { return future.Response(), mc.Observe(err) @@ -581,7 +593,9 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa } func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { + mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "PublicIPGet") return } @@ -591,14 +605,15 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, publicIPAddressName, expand) mc.Observe(err) return } func (az *azPublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { + mc := newMetricContext("vmss_public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSSPublicIPGet") return } @@ -608,14 +623,15 @@ func (az *azPublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ct klog.V(10).Infof("azPublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("vmss_public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand) mc.Observe(err) return } func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, error) { + mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() return nil, createRateLimitErr(false, "PublicIPList") } @@ -624,7 +640,6 @@ func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -669,8 +684,10 @@ func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient { } func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) { + mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "SubnetCreateOrUpdate") return } @@ -680,7 +697,6 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) if err != nil { return future.Response(), mc.Observe(err) @@ -691,8 +707,10 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName } func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) { + mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "SubnetDelete") return } @@ -702,7 +720,6 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, virtualNetworkName, subnetName) if err != nil { return future.Response(), mc.Observe(err) @@ -713,7 +730,9 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, } func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { + mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "SubnetGet") return } @@ -723,14 +742,15 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, virtualNetworkName, subnetName, expand) mc.Observe(err) return } func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, error) { + mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() return nil, createRateLimitErr(false, "SubnetList") } @@ -739,7 +759,6 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() - mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualNetworkName) mc.Observe(err) if err != nil { @@ -784,8 +803,10 @@ func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient { } func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) (resp *http.Response, err error) { + mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "NSGCreateOrUpdate") return } @@ -795,7 +816,6 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) if err != nil { return nil, mc.Observe(err) @@ -838,8 +858,10 @@ func (az *azSecurityGroupsClient) createOrUpdatePreparer(ctx context.Context, re } func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) { + mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "NSGDelete") return } @@ -849,7 +871,6 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, networkSecurityGroupName) if err != nil { return future.Response(), mc.Observe(err) @@ -860,7 +881,9 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName } func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { + mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "NSGGet") return } @@ -870,14 +893,15 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, networkSecurityGroupName, expand) mc.Observe(err) return } func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, error) { + mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() return nil, createRateLimitErr(false, "NSGList") } @@ -886,7 +910,6 @@ func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName st klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -931,7 +954,9 @@ func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachin } func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { + mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSSGet") return } @@ -941,14 +966,15 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() - mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName) mc.Observe(err) return } func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) { + mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSSList") return } @@ -958,7 +984,6 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -978,8 +1003,10 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro } func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) { + mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "NiCreateOrUpdate") return } @@ -989,7 +1016,6 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, vmScaleSetName) }() - mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, vmScaleSetName, parameters) if err != nil { return future.Response(), mc.Observe(err) @@ -1024,8 +1050,10 @@ func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMach } } -func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { +func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, err error) { + mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSSGet") return } @@ -1035,31 +1063,15 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID, "") - result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName, instanceID) - mc.Observe(err) - return -} - -func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { - if !az.rateLimiterReader.TryAccept() { - err = createRateLimitErr(false, "VMSSGetInstanceView") - return - } - - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) - }() - - mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID, "") - result, err = az.client.GetInstanceView(ctx, resourceGroupName, VMScaleSetName, instanceID) + result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand) mc.Observe(err) return } func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) { + mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSSList") return } @@ -1069,7 +1081,6 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() - mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) mc.Observe(err) if err != nil { @@ -1089,7 +1100,9 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG } func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) { + mc := newMetricContext("vmssvm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "VMSSVMUpdate") return } @@ -1099,7 +1112,6 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - mc := newMetricContext("vmssvm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) future, err := az.client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) if err != nil { return future.Response(), mc.Observe(err) @@ -1135,8 +1147,10 @@ func newAzRoutesClient(config *azClientConfig) *azRoutesClient { } func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) (resp *http.Response, err error) { + mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "RouteCreateOrUpdate") return } @@ -1146,7 +1160,6 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag) if err != nil { mc.Observe(err) @@ -1192,8 +1205,10 @@ func (az *azRoutesClient) createOrUpdatePreparer(ctx context.Context, resourceGr } func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) { + mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "RouteDelete") return } @@ -1203,7 +1218,6 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, routeTableName, routeName) if err != nil { return future.Response(), mc.Observe(err) @@ -1239,8 +1253,10 @@ func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient { } func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) (resp *http.Response, err error) { + mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "RouteTableCreateOrUpdate") return } @@ -1250,7 +1266,6 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() - mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, parameters, etag) if err != nil { return nil, mc.Observe(err) @@ -1293,7 +1308,9 @@ func (az *azRouteTablesClient) createOrUpdatePreparer(ctx context.Context, resou } func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { + mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "GetRouteTable") return } @@ -1303,7 +1320,6 @@ func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() - mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, routeTableName, expand) mc.Observe(err) return @@ -1334,8 +1350,10 @@ func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient { } func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (result *http.Response, err error) { + mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "StorageAccountCreate") return } @@ -1345,7 +1363,6 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Create(ctx, resourceGroupName, accountName, parameters) if err != nil { return future.Response(), err @@ -1357,7 +1374,9 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName } func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "DeleteStorageAccount") return } @@ -1367,14 +1386,15 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Delete(ctx, resourceGroupName, accountName) mc.Observe(err) return } func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { + mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "ListStorageAccountKeys") return } @@ -1384,14 +1404,15 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID, "") - result, err = az.client.ListKeys(ctx, resourceGroupName, accountName) + result, err = az.client.ListKeys(ctx, resourceGroupName, accountName, storage.Kerb) mc.Observe(err) return } func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) { + mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "ListStorageAccountsByResourceGroup") return } @@ -1401,14 +1422,15 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() - mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.ListByResourceGroup(ctx, resourceGroupName) mc.Observe(err) return } func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) { + mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "GetStorageAccount/Properties") return } @@ -1418,7 +1440,6 @@ func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGro klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetProperties(ctx, resourceGroupName, accountName, "") mc.Observe(err) return @@ -1449,8 +1470,10 @@ func newAzDisksClient(config *azClientConfig) *azDisksClient { } func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) { + mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "DiskCreateOrUpdate") return } @@ -1460,7 +1483,6 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter) if err != nil { return future.Response(), mc.Observe(err) @@ -1471,8 +1493,10 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s } func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) { + mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(true, "DiskDelete") return } @@ -1482,18 +1506,18 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, diskName) if err != nil { return future.Response(), mc.Observe(err) } - err = future.WaitForCompletionRef(ctx, az.client.Client) return future.Response(), mc.Observe(err) } func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) { + mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "GetDisk") return } @@ -1503,7 +1527,6 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, diskName) mc.Observe(err) return @@ -1547,7 +1570,9 @@ func newAzVirtualMachineSizesClient(config *azClientConfig) *azVirtualMachineSiz } func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error) { + mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() err = createRateLimitErr(false, "VMSizesList") return } @@ -1557,7 +1582,6 @@ func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) }() - mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID, "") result, err = az.client.List(ctx, location) mc.Observe(err) return diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index 10518b05e2a..7382e9c69ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -23,6 +23,7 @@ import ( "fmt" "path" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" @@ -32,7 +33,6 @@ import ( cloudprovider "k8s.io/cloud-provider" volerr "k8s.io/cloud-provider/volume/errors" "k8s.io/klog" - "k8s.io/utils/keymutex" ) const ( @@ -57,19 +57,20 @@ var defaultBackOff = kwait.Backoff{ Jitter: 0.0, } -// acquire lock to attach/detach disk in one node -var diskOpMutex = keymutex.NewHashed(0) - type controllerCommon struct { subscriptionID string location string storageEndpointSuffix string resourceGroup string - cloud *Cloud + // store disk URI when disk is in attaching or detaching process + diskAttachDetachMap sync.Map + // vm disk map used to lock per vm update calls + vmLockMap *lockMap + cloud *Cloud } // getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. -func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) { +func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt cacheReadType) (VMSet, error) { // 1. vmType is standard, return cloud.vmSet directly. if c.cloud.VMType == vmTypeStandard { return c.cloud.vmSet, nil @@ -82,7 +83,7 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) } // 3. If the node is managed by availability set, then return ss.availabilitySet. - managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName)) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt) if err != nil { return nil, err } @@ -98,6 +99,7 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI. // return (lun, error) func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) { + diskEncryptionSetID := "" if isManagedDisk { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) @@ -122,21 +124,26 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri danglingErr := volerr.NewDanglingError(attachErr, types.NodeName(attachedNode), "") return -1, danglingErr } + + if disk.DiskProperties != nil && disk.DiskProperties.Encryption != nil && + disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { + diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID + } } - vmset, err := c.getNodeVMSet(nodeName) + vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe) if err != nil { return -1, err } instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - klog.Warningf("failed to get azure instance id (%v)", err) + klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName) return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } - diskOpMutex.LockKey(instanceid) - defer diskOpMutex.UnlockKey(instanceid) + c.vmLockMap.LockEntry(strings.ToLower(string(nodeName))) + defer c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) lun, err := c.GetNextDiskLun(nodeName) if err != nil { @@ -145,35 +152,47 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName) - return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) + c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "attaching") + defer c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) + return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode, diskEncryptionSetID) } // DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error { - vmset, err := c.getNodeVMSet(nodeName) + _, err := c.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - return err + if err == cloudprovider.InstanceNotFound { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached", + nodeName, diskURI) + return nil + } + klog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } - instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) + vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe) if err != nil { - klog.Warningf("failed to get azure instance id (%v)", err) - return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + return err } klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) // make the lock here as small as possible - diskOpMutex.LockKey(instanceid) + c.vmLockMap.LockEntry(strings.ToLower(string(nodeName))) + c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching") resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) - diskOpMutex.UnlockKey(instanceid) + c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) + c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) { - diskOpMutex.LockKey(instanceid) + c.vmLockMap.LockEntry(strings.ToLower(string(nodeName))) + c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching") resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) - diskOpMutex.UnlockKey(instanceid) + c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) + c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) return c.cloud.processHTTPRetryResponse(nil, "", resp, err) }) if retryErr != nil { @@ -191,18 +210,20 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N } // getNodeDataDisks invokes vmSet interfaces to get data disks for the node. -func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { - vmset, err := c.getNodeVMSet(nodeName) +func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) { + vmset, err := c.getNodeVMSet(nodeName, crt) if err != nil { return nil, err } - return vmset.GetDataDisks(nodeName) + return vmset.GetDataDisks(nodeName, crt) } // GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI. func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { - disks, err := c.getNodeDataDisks(nodeName) + // getNodeDataDisks need to fetch the cached data/fresh data if cache expired here + // to ensure we get LUN based on latest entry. + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err @@ -212,9 +233,13 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) || (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || (disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { - // found the disk - klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) - return *disk.Lun, nil + if disk.ToBeDetached != nil && *disk.ToBeDetached { + klog.Warningf("azureDisk - find disk(ToBeDetached): lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + } else { + // found the disk + klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + return *disk.Lun, nil + } } } return -1, fmt.Errorf("cannot find Lun for disk %s", diskName) @@ -222,7 +247,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N // GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used. func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - disks, err := c.getNodeDataDisks(nodeName) + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err @@ -249,7 +274,11 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N attached[diskName] = false } - disks, err := c.getNodeDataDisks(nodeName) + // doing stalled read for getNodeDataDisks to ensure we don't call ARM + // for every reconcile call. The cache is invalidated after Attach/Detach + // disk. So the new entry will be fetched and cached the first time reconcile + // loop runs after the Attach/Disk OP which will reflect the latest model. + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeUnsafe) if err != nil { if err == cloudprovider.InstanceNotFound { // if host doesn't exist, no need to detach @@ -271,3 +300,17 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N return attached, nil } + +func filterDetachingDisks(unfilteredDisks []compute.DataDisk) []compute.DataDisk { + filteredDisks := []compute.DataDisk{} + for _, disk := range unfilteredDisks { + if disk.ToBeDetached != nil && *disk.ToBeDetached { + if disk.Name != nil { + klog.V(2).Infof("Filtering disk: %s with ToBeDetached flag set.", *disk.Name) + } + } else { + filteredDisks = append(filteredDisks, disk) + } + } + return filteredDisks +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index 7490d93fcf4..93440be6aa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -30,8 +30,8 @@ import ( // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. -func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, err := as.getVirtualMachine(nodeName) +func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { + vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault) if err != nil { return err } @@ -42,19 +42,20 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri return err } - disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + disks := filterDetachingDisks(*vm.StorageProfile.DataDisks) if isManagedDisk { + managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} + if diskEncryptionSetID != "" { + managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID} + } disks = append(disks, compute.DataDisk{ Name: &diskName, Lun: &lun, Caching: cachingMode, CreateOption: "attach", - ManagedDisk: &compute.ManagedDiskParameters{ - ID: &diskURI, - }, + ManagedDisk: managedDisk, }) } else { disks = append(disks, @@ -77,7 +78,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri }, }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) with DiskEncryptionSetID(%s)", nodeResourceGroup, vmName, diskName, diskURI, diskEncryptionSetID) ctx, cancel := getContextWithCancel() defer cancel() @@ -102,7 +103,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri // DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { - vm, err := as.getVirtualMachine(nodeName) + vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI) @@ -115,8 +116,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N return nil, err } - disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + disks := filterDetachingDisks(*vm.StorageProfile.DataDisks) bFoundDisk := false for i, disk := range disks { @@ -155,8 +155,8 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N } // GetDataDisks gets a list of data disks attached to the node. -func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { - vm, err := as.getVirtualMachine(nodeName) +func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) { + vm, err := as.getVirtualMachine(nodeName, crt) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index 5115d8043ec..2b237482f63 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -30,9 +30,9 @@ import ( // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. -func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { +func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return err } @@ -44,19 +44,20 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod disks := []compute.DataDisk{} if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + disks = filterDetachingDisks(*vm.StorageProfile.DataDisks) } if isManagedDisk { + managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} + if diskEncryptionSetID != "" { + managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID} + } disks = append(disks, compute.DataDisk{ Name: &diskName, Lun: &lun, Caching: compute.CachingTypes(cachingMode), CreateOption: "attach", - ManagedDisk: &compute.ManagedDiskParameters{ - ID: &diskURI, - }, + ManagedDisk: managedDisk, }) } else { disks = append(disks, @@ -86,10 +87,9 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod defer cancel() // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + defer ss.deleteCacheForNode(vmName) - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) with DiskEncryptionSetID(%s)", nodeResourceGroup, nodeName, diskName, diskURI, diskEncryptionSetID) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") if err != nil { detail := err.Error() @@ -108,7 +108,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod // the vhd can be identified by diskName or diskURI func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return nil, err } @@ -120,8 +120,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName disks := []compute.DataDisk{} if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + disks = filterDetachingDisks(*vm.StorageProfile.DataDisks) } bFoundDisk := false for i, disk := range disks { @@ -157,16 +156,15 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName defer cancel() // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + defer ss.deleteCacheForNode(vmName) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") } // GetDataDisks gets a list of data disks attached to the node. -func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { - _, _, vm, err := ss.getVmssVM(string(nodeName)) +func (ss *scaleSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) { + _, _, vm, err := ss.getVmssVM(string(nodeName), crt) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 659ffa522bf..8772cb7143c 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -549,7 +549,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resou return result, nil } -func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -566,15 +566,6 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resour } } -func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { - _, err = fVMC.Get(ctx, resourceGroupName, VMScaleSetName, instanceID) - if err != nil { - return result, err - } - - return result, nil -} - func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -951,7 +942,7 @@ func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { +func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { return fmt.Errorf("unimplemented") } @@ -959,7 +950,7 @@ func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName return nil, fmt.Errorf("unimplemented") } -func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { +func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) { return nil, fmt.Errorf("unimplemented") } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instance_metadata.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instance_metadata.go index 187e77f679f..8a3edb9e371 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instance_metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instance_metadata.go @@ -125,7 +125,7 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{} } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status) } @@ -144,8 +144,9 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{} } // GetMetadata gets instance metadata from cache. -func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) { - cache, err := ims.imsCache.Get(metadataCacheKey) +// crt determines if we can get data from stalled cache/need fresh if cache expired. +func (ims *InstanceMetadataService) GetMetadata(crt cacheReadType) (*InstanceMetadata, error) { + cache, err := ims.imsCache.Get(metadataCacheKey, crt) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go index ae37614dc82..418be9e3f47 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata() + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return nil, err } @@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata() + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return "", err } @@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata() + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 8dfeb4f3a80..a7cf38fdf08 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -35,8 +35,6 @@ import ( cloudprovider "k8s.io/cloud-provider" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog" - - utilfeature "k8s.io/apiserver/pkg/util/feature" utilnet "k8s.io/utils/net" ) @@ -104,18 +102,38 @@ const ( clusterNameKey = "kubernetes-cluster-name" ) -// GetLoadBalancer returns whether the specified load balancer exists, and +// GetLoadBalancer returns whether the specified load balancer and its components exist, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { - _, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false) + // Since public IP is not a part of the load balancer on Azure, + // there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571). + // We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure. + existsPip := func() bool { + pipName, _, err := az.determinePublicIPName(clusterName, service) + if err != nil { + return false + } + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) + _, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) + if err != nil { + return false + } + return existsPip + }() + + _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { - return nil, false, err + return nil, existsPip, err } - if !exists { + + // Return exists = false only if the load balancer and the public IP are not found on Azure + if !existsLb && !existsPip { serviceName := getServiceName(service) klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) return nil, false, nil } + + // Return exists = true if either the load balancer or the public IP (or both) exists return status, true, nil } @@ -169,6 +187,10 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser // UpdateLoadBalancer updates hosts under the specified load balancer. func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { + if !az.shouldUpdateLoadBalancer(clusterName, service) { + klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name) + return nil + } _, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes) return err } @@ -475,7 +497,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s return service.Spec.LoadBalancerIP, nil } - lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service) + _, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { return "", err } @@ -539,15 +561,19 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } - if utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { + if az.ipv6DualStackEnabled { // TODO: (khenidak) if we ever enable IPv6 single stack, then we should // not wrap the following in a feature gate ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) if ipv6 { pip.PublicIPAddressVersion = network.IPv6 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) - // static allocation on IPv6 on Azure is not allowed + pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Dynamic + if az.useStandardLoadBalancer() { + // standard sku must have static allocation method for ipv6 + pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Static + } } else { pip.PublicIPAddressVersion = network.IPv4 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) @@ -669,7 +695,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service)) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) - lbBackendPoolName := getBackendPoolName(clusterName, service) + lbBackendPoolName := getBackendPoolName(az.ipv6DualStackEnabled, clusterName, service) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName) lbIdleTimeout, err := getIdleTimeout(service) @@ -934,7 +960,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if isInternal { // Refresh updated lb which will be used later in other places. - newLB, exist, err := az.getAzureLoadBalancer(lbName) + newLB, exist, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault) if err != nil { klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err @@ -1068,8 +1094,9 @@ func (az *Cloud) reconcileLoadBalancerRule( expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout } - // we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed - if protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP { + // we didn't construct the probe objects for UDP or SCTP because they're not allowed on Azure. + // However, when externalTrafficPolicy is Local, Kubernetes HTTP health check would be used for probing. + if servicehelpers.NeedsHealthCheck(service) || (protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP) { expectedRule.Probe = &network.SubResource{ ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), } @@ -1097,7 +1124,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, ports = []v1.ServicePort{} } - sg, err := az.getSecurityGroup() + sg, err := az.getSecurityGroup(cacheReadTypeDefault) if err != nil { return nil, err } @@ -1279,6 +1306,11 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return &sg, nil } +func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service) bool { + _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nil, false) + return existsLb && service.ObjectMeta.DeletionTimestamp == nil +} + func logSafe(s *string) string { if s == nil { return "(nil)" @@ -1433,7 +1465,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa } if lbName != "" { - loadBalancer, _, err := az.getAzureLoadBalancer(lbName) + loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index ebbae1da8d0..3a48df20b50 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -40,6 +40,8 @@ const ( // default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd defaultDiskIOPSReadWrite = 500 defaultDiskMBpsReadWrite = 100 + + diskEncryptionSetIDFormat = "/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}" ) //ManagedDiskController : managed disk controller struct @@ -67,6 +69,8 @@ type ManagedDiskOptions struct { DiskIOPSReadWrite string // Throughput Cap (MBps) for UltraSSD disk DiskMBpsReadWrite string + // ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID string } //CreateManagedDisk : create managed disk @@ -129,6 +133,16 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( } } + if options.DiskEncryptionSetID != "" { + if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 { + return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, diskEncryptionSetIDFormat) + } + diskProperties.Encryption = &compute.Encryption{ + DiskEncryptionSetID: &options.DiskEncryptionSetID, + Type: compute.EncryptionAtRestWithCustomerKey, + } + } + model := compute.Disk{ Location: &c.common.location, Tags: newTags, @@ -187,6 +201,10 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { ctx, cancel := getContextWithCancel() defer cancel() + if _, ok := c.common.diskAttachDetachMap.Load(strings.ToLower(diskURI)); ok { + return fmt.Errorf("failed to delete disk(%s) since it's in attaching or detaching state", diskURI) + } + _, err = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName) if err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_metrics.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_metrics.go index 3ba202756d5..999a4a2b639 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_metrics.go @@ -27,8 +27,9 @@ import ( ) type apiCallMetrics struct { - latency *metrics.HistogramVec - errors *metrics.CounterVec + latency *metrics.HistogramVec + errors *metrics.CounterVec + rateLimitedCount *metrics.CounterVec } var ( @@ -53,6 +54,9 @@ func newMetricContext(prefix, request, resourceGroup, subscriptionID, source str attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source}, } } +func (mc *metricContext) RateLimitedCount() { + apiMetrics.rateLimitedCount.WithLabelValues(mc.attributes...).Inc() +} func (mc *metricContext) Observe(err error) error { apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( @@ -82,10 +86,19 @@ func registerAPIMetrics(attributes ...string) *apiCallMetrics { }, attributes, ), + rateLimitedCount: metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "cloudprovider_azure_api_request_ratelimited_count", + Help: "Number of rate limited Azure API calls", + StabilityLevel: metrics.ALPHA, + }, + attributes, + ), } legacyregistry.MustRegister(metrics.latency) legacyregistry.MustRegister(metrics.errors) + legacyregistry.MustRegister(metrics.rateLimitedCount) return metrics } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go index 3cc689fa597..27d807ed47d 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go @@ -30,10 +30,6 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" utilnet "k8s.io/utils/net" - - // Azure route controller changes behavior if ipv6dual stack feature is turned on - // remove this once the feature graduates - utilfeature "k8s.io/apiserver/pkg/util/feature" ) // copied to minimize the number of cross reference @@ -46,8 +42,8 @@ const ( // ListRoutes lists all managed routes that belong to the specified clusterName func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) - routeTable, existsRouteTable, err := az.getRouteTable() - routes, err := processRoutes(routeTable, existsRouteTable, err) + routeTable, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault) + routes, err := processRoutes(az.ipv6DualStackEnabled, routeTable, existsRouteTable, err) if err != nil { return nil, err } @@ -63,7 +59,7 @@ func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr if cidr, ok := az.routeCIDRs[nodeName]; ok { routes = append(routes, &cloudprovider.Route{ Name: nodeName, - TargetNode: mapRouteNameToNodeName(nodeName), + TargetNode: mapRouteNameToNodeName(az.ipv6DualStackEnabled, nodeName), DestinationCIDR: cidr, }) } @@ -73,7 +69,7 @@ func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr } // Injectable for testing -func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) { +func processRoutes(ipv6DualStackEnabled bool, routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) { if err != nil { return nil, err } @@ -85,7 +81,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil { kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes)) for i, route := range *routeTable.Routes { - instance := mapRouteNameToNodeName(*route.Name) + instance := mapRouteNameToNodeName(ipv6DualStackEnabled, *route.Name) cidr := *route.AddressPrefix klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) @@ -102,7 +98,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl } func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { - if _, existsRouteTable, err := az.getRouteTable(); err != nil { + if _, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault); err != nil { klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return err } else if existsRouteTable { @@ -141,7 +137,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } if unmanaged { - if utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { + if az.ipv6DualStackEnabled { //TODO (khenidak) add support for unmanaged nodes when the feature reaches beta return fmt.Errorf("unmanaged nodes are not supported in dual stack mode") } @@ -156,7 +152,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil { return err } - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { + if !az.ipv6DualStackEnabled { targetIP, _, err = az.getIPForMachine(kubeRoute.TargetNode) if err != nil { return err @@ -178,7 +174,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } } - routeName := mapNodeNameToRouteName(kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) + routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) route := network.Route{ Name: to.StringPtr(routeName), RoutePropertiesFormat: &network.RoutePropertiesFormat{ @@ -217,7 +213,7 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) - routeName := mapNodeNameToRouteName(kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) + routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) err = az.DeleteRouteWithName(routeName) if err != nil { return err @@ -231,16 +227,16 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute // These two functions enable stashing the instance name in the route // and then retrieving it later when listing. This is needed because // Azure does not let you put tags/descriptions on the Route itself. -func mapNodeNameToRouteName(nodeName types.NodeName, cidr string) string { - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { +func mapNodeNameToRouteName(ipv6DualStackEnabled bool, nodeName types.NodeName, cidr string) string { + if !ipv6DualStackEnabled { return fmt.Sprintf("%s", nodeName) } return fmt.Sprintf(routeNameFmt, nodeName, cidrtoRfc1035(cidr)) } // Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName. -func mapRouteNameToNodeName(routeName string) types.NodeName { - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { +func mapRouteNameToNodeName(ipv6DualStackEnabled bool, routeName string) types.NodeName { + if !ipv6DualStackEnabled { return types.NodeName(fmt.Sprintf("%s", routeName)) } parts := strings.Split(routeName, routeNameSeparator) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go index 444584edf37..22224db9c3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -40,7 +40,6 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" utilnet "k8s.io/utils/net" ) @@ -263,8 +262,8 @@ func isInternalLoadBalancer(lb *network.LoadBalancer) bool { // clusters moving from IPv4 to duakstack will require no changes // clusters moving from IPv6 (while not seen in the wild, we can not rule out their existence) // to dualstack will require deleting backend pools (the reconciler will take care of creating correct backendpools) -func getBackendPoolName(clusterName string, service *v1.Service) string { - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { +func getBackendPoolName(ipv6DualStackEnabled bool, clusterName string, service *v1.Service) string { + if !ipv6DualStackEnabled { return clusterName } IPv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) @@ -375,14 +374,14 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) var machine compute.VirtualMachine var err error - machine, err = as.getVirtualMachine(types.NodeName(name)) + machine, err = as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err == cloudprovider.InstanceNotFound { return "", cloudprovider.InstanceNotFound } if err != nil { if as.CloudProviderBackoff { klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) - machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) + machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) return "", err @@ -403,7 +402,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) // GetPowerStatusByNodeName returns the power state of the specified node. func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) { - vm, err := as.getVirtualMachine(types.NodeName(name)) + vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeDefault) if err != nil { return powerState, err } @@ -436,7 +435,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod // GetInstanceTypeByNodeName gets the instance type by node name. func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { - machine, err := as.getVirtualMachine(types.NodeName(name)) + machine, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) return "", err @@ -448,7 +447,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error // GetZoneByNodeName gets availability zone for the specified node. If the node is not running // with availability zone, then it returns fault domain. func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - vm, err := as.getVirtualMachine(types.NodeName(name)) + vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } @@ -649,7 +648,7 @@ func extractResourceGroupByNicID(nicID string) (string, error) { func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) { var machine compute.VirtualMachine - machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) + machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cacheReadTypeDefault) if err != nil { klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) return network.Interface{}, err @@ -721,7 +720,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types. } var primaryIPConfig *network.InterfaceIPConfiguration - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { + if !as.Cloud.ipv6DualStackEnabled { primaryIPConfig, err = getPrimaryIPConfig(nic) if err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go new file mode 100644 index 00000000000..d4b5aba8015 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go @@ -0,0 +1,71 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "sync" +) + +// lockMap used to lock on entries +type lockMap struct { + sync.Mutex + mutexMap map[string]*sync.Mutex +} + +// NewLockMap returns a new lock map +func newLockMap() *lockMap { + return &lockMap{ + mutexMap: make(map[string]*sync.Mutex), + } +} + +// LockEntry acquires a lock associated with the specific entry +func (lm *lockMap) LockEntry(entry string) { + lm.Lock() + // check if entry does not exists, then add entry + if _, exists := lm.mutexMap[entry]; !exists { + lm.addEntry(entry) + } + + lm.Unlock() + lm.lockEntry(entry) +} + +// UnlockEntry release the lock associated with the specific entry +func (lm *lockMap) UnlockEntry(entry string) { + lm.Lock() + defer lm.Unlock() + + if _, exists := lm.mutexMap[entry]; !exists { + return + } + lm.unlockEntry(entry) +} + +func (lm *lockMap) addEntry(entry string) { + lm.mutexMap[entry] = &sync.Mutex{} +} + +func (lm *lockMap) lockEntry(entry string) { + lm.mutexMap[entry].Lock() +} + +func (lm *lockMap) unlockEntry(entry string) { + lm.mutexMap[entry].Unlock() +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go index 26b6e1292c8..c86ef86d6c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go @@ -66,11 +66,11 @@ type VMSet interface { EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. - AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) // GetDataDisks gets a list of data disks attached to the node. - GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) + GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) // GetPowerStatusByNodeName returns the power state of the specified node. GetPowerStatusByNodeName(name string) (string, error) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 040de1d88b0..379977ac2b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -25,6 +25,7 @@ import ( "sort" "strconv" "strings" + "sync" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" @@ -36,7 +37,6 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" - utilfeature "k8s.io/apiserver/pkg/util/feature" utilnet "k8s.io/utils/net" ) @@ -60,10 +60,8 @@ type scaleSet struct { // (e.g. master nodes) may not belong to any scale sets. availabilitySet VMSet - vmssCache *timedCache - vmssVMCache *timedCache - nodeNameToScaleSetMappingCache *timedCache - availabilitySetNodesCache *timedCache + vmssVMCache *timedCache + availabilitySetNodesCache *timedCache } // newScaleSet creates a new scaleSet. @@ -74,22 +72,12 @@ func newScaleSet(az *Cloud) (VMSet, error) { availabilitySet: newAvailabilitySet(az), } - ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache() - if err != nil { - return nil, err - } - ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache() if err != nil { return nil, err } - ss.vmssCache, err = ss.newVmssCache() - if err != nil { - return nil, err - } - - ss.vmssVMCache, err = ss.newVmssVMCache() + ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache() if err != nil { return nil, err } @@ -99,44 +87,51 @@ func newScaleSet(az *Cloud) (VMSet, error) { // getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. // It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. -func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) { - instanceID, err = getScaleSetVMInstanceID(nodeName) - if err != nil { - return ssName, instanceID, vm, err - } +func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { + getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + if err != nil { + return "", "", nil, err + } - ssName, err = ss.getScaleSetNameByNodeName(nodeName) - if err != nil { - return ssName, instanceID, vm, err - } + virtualMachines := cached.(*sync.Map) + if vm, ok := virtualMachines.Load(nodeName); ok { + result := vm.(*vmssVirtualMachinesEntry) + return result.vmssName, result.instanceID, result.virtualMachine, nil + } - if ssName == "" { - return "", "", vm, cloudprovider.InstanceNotFound + return "", "", nil, nil } - resourceGroup, err := ss.GetNodeResourceGroup(nodeName) + _, err := getScaleSetVMInstanceID(nodeName) if err != nil { - return "", "", vm, err + return "", "", nil, err } - klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) - key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID)) - cachedVM, err := ss.vmssVMCache.Get(key) + vmssName, instanceID, vm, err := getter(nodeName) if err != nil { - return ssName, instanceID, vm, err + return "", "", nil, err + } + if vm != nil { + return vmssName, instanceID, vm, nil } - if cachedVM == nil { - klog.Errorf("Can't find node (%q) in any scale sets", nodeName) - return ssName, instanceID, vm, cloudprovider.InstanceNotFound + klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) + ss.vmssVMCache.Delete(vmssVirtualMachinesKey) + vmssName, instanceID, vm, err = getter(nodeName) + if err != nil { + return "", "", nil, err } - return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil + if vm == nil { + return "", "", nil, cloudprovider.InstanceNotFound + } + return vmssName, instanceID, vm, nil } // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { - _, _, vm, err := ss.getVmssVM(name) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeDefault) if err != nil { return powerState, err } @@ -158,27 +153,56 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. -func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) { - vmName := ss.makeVmssVMName(scaleSetName, instanceID) - key := buildVmssCacheKey(resourceGroup, vmName) - cachedVM, err := ss.vmssVMCache.Get(key) +func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) { + getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + if err != nil { + return nil, false, err + } + + virtualMachines := cached.(*sync.Map) + virtualMachines.Range(func(key, value interface{}) bool { + vmEntry := value.(*vmssVirtualMachinesEntry) + if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) && + strings.EqualFold(vmEntry.vmssName, scaleSetName) && + strings.EqualFold(vmEntry.instanceID, instanceID) { + vm = vmEntry.virtualMachine + found = true + return false + } + + return true + }) + + return vm, found, nil + } + + vm, found, err := getter() if err != nil { - return vm, err + return nil, err + } + if found { + return vm, nil } - if cachedVM == nil { - klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID) - return vm, cloudprovider.InstanceNotFound + klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID) + ss.vmssVMCache.Delete(vmssVirtualMachinesKey) + vm, found, err = getter() + if err != nil { + return nil, err + } + if !found { + return nil, cloudprovider.InstanceNotFound } - return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil + return vm, nil } // GetInstanceIDByNodeName gets the cloud provider ID by node name. // It must return ("", cloudprovider.InstanceNotFound) if the instance does // not exist or is no longer running. func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err @@ -188,7 +212,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceIDByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -222,7 +246,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, return ss.availabilitySet.GetNodeNameByProviderID(providerID) } - vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID) + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -237,7 +261,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, // GetInstanceTypeByNodeName gets the instance type by node name. func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err @@ -247,7 +271,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceTypeByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -262,7 +286,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { // GetZoneByNodeName gets availability zone for the specified node. If the node is not running // with availability zone, then it returns fault domain. func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return cloudprovider.Zone{}, err @@ -272,7 +296,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return ss.availabilitySet.GetZoneByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } @@ -290,6 +314,11 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { } else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil { // Availability zone is not used for the node, falling back to fault domain. failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)) + } else { + err = fmt.Errorf("failed to get zone info") + klog.Errorf("GetZoneByNodeName: got unexpected error %v", err) + ss.deleteCacheForNode(name) + return cloudprovider.Zone{}, err } return cloudprovider.Zone{ @@ -463,9 +492,15 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { return nil, err } - ssNames := make([]string, len(allScaleSets)) - for i := range allScaleSets { - ssNames[i] = *(allScaleSets[i].Name) + ssNames := make([]string, 0) + for _, vmss := range allScaleSets { + name := *vmss.Name + if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 { + klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name) + continue + } + + ssNames = append(ssNames, name) } return ssNames, nil @@ -500,7 +535,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { } nodeName := nodes[nx].Name - ssName, err := ss.getScaleSetNameByNodeName(nodeName) + ssName, _, _, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { return nil, err } @@ -578,7 +613,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) { // GetPrimaryInterface gets machine primary network interface by node name and vmSet. func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return network.Interface{}, err @@ -588,7 +623,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return ss.availabilitySet.GetPrimaryInterface(nodeName) } - ssName, instanceID, vm, err := ss.getVmssVM(nodeName) + ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { // VM is availability set, but not cached yet in availabilitySetNodesCache. if err == ErrorNotVmssInstance { @@ -599,7 +634,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return network.Interface{}, err } - primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) + primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm) if err != nil { klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) return network.Interface{}, err @@ -711,7 +746,7 @@ func (ss *scaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return err } @@ -739,7 +774,7 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam var primaryIPConfiguration *compute.VirtualMachineScaleSetIPConfiguration // Find primary network interface configuration. - if !utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) { + if !ss.Cloud.ipv6DualStackEnabled { // Find primary IP configuration. primaryIPConfiguration, err = getPrimaryIPConfigFromVMSSNetworkConfig(primaryNetworkInterfaceConfiguration) if err != nil { @@ -815,9 +850,8 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam return err } - // Invalidate the cache since we would update it. - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + // Invalidate the cache since right after update + defer ss.deleteCacheForNode(vmName) // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() @@ -990,7 +1024,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac f := func() error { // Check whether the node is VMAS virtual machine. - managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) return err @@ -1031,7 +1065,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node. func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error { - ssName, instanceID, vm, err := ss.getVmssVM(nodeName) + ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { return err } @@ -1093,9 +1127,8 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa return err } - // Invalidate the cache since we would update it. - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + // Invalidate the cache since right after update + defer ss.deleteCacheForNode(nodeName) // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() @@ -1129,7 +1162,7 @@ func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (st resourceGroup := matches[1] scaleSetName := matches[2] instanceID := matches[3] - vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID) + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe) if err != nil { return "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index a7ef5a5ff19..614b16b93e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -21,8 +21,12 @@ package azure import ( "fmt" "strings" + "sync" "time" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" ) @@ -31,18 +35,20 @@ var ( vmssNameSeparator = "_" vmssCacheSeparator = "#" - nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey" - availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" + vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey" + availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" - vmssCacheTTL = time.Minute - vmssVMCacheTTL = time.Minute - availabilitySetNodesCacheTTL = 5 * time.Minute - nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute + availabilitySetNodesCacheTTL = 15 * time.Minute + vmssVirtualMachinesTTL = 10 * time.Minute ) -// nodeNameToScaleSetMapping maps nodeName to scaleSet name. -// The map is required because vmss nodeName is not equal to its vmName. -type nodeNameToScaleSetMapping map[string]string +type vmssVirtualMachinesEntry struct { + resourceGroup string + vmssName string + instanceID string + virtualMachine *compute.VirtualMachineScaleSetVM + lastUpdate time.Time +} func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string { return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID) @@ -62,32 +68,9 @@ func extractVmssVMName(name string) (string, string, error) { return ssName, instanceID, nil } -// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups -// will be excluded from LB backends. -func (ss *scaleSet) newVmssCache() (*timedCache, error) { +func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key) - exists, message, realErr := checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - - if !exists { - klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) - return nil, nil - } - - return &result, nil - } - - return newTimedcache(vmssCacheTTL, getter) -} - -func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { - getter := func(key string) (interface{}, error) { - localCache := make(nodeNameToScaleSetMapping) + localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry allResourceGroups, err := ss.GetResourceGroups() if err != nil { @@ -106,14 +89,21 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { return nil, err } - for _, vm := range vms { + for i := range vms { + vm := vms[i] if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } computerName := strings.ToLower(*vm.OsProfile.ComputerName) - localCache[computerName] = ssName + localCache.Store(computerName, &vmssVirtualMachinesEntry{ + resourceGroup: resourceGroup, + vmssName: ssName, + instanceID: to.String(vm.InstanceID), + virtualMachine: &vm, + lastUpdate: time.Now().UTC(), + }) } } } @@ -121,7 +111,19 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { return localCache, nil } - return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter) + return newTimedcache(vmssVirtualMachinesTTL, getter) +} + +func (ss *scaleSet) deleteCacheForNode(nodeName string) error { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe) + if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) + return err + } + + virtualMachines := cached.(*sync.Map) + virtualMachines.Delete(nodeName) + return nil } func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) { @@ -151,111 +153,8 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) { return newTimedcache(availabilitySetNodesCacheTTL, getter) } -func buildVmssCacheKey(resourceGroup, name string) string { - // key is composed of # - return fmt.Sprintf("%s%s%s", strings.ToLower(resourceGroup), vmssCacheSeparator, name) -} - -func extractVmssCacheKey(key string) (string, string, error) { - // key is composed of # - keyItems := strings.Split(key, vmssCacheSeparator) - if len(keyItems) != 2 { - return "", "", fmt.Errorf("key %q is not in format '#'", key) - } - - resourceGroup := keyItems[0] - vmName := keyItems[1] - return resourceGroup, vmName, nil -} - -func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { - getter := func(key string) (interface{}, error) { - // key is composed of # - resourceGroup, vmName, err := extractVmssCacheKey(key) - if err != nil { - return nil, err - } - - // vmName's format is 'scaleSetName_instanceID' - ssName, instanceID, err := extractVmssVMName(vmName) - if err != nil { - return nil, err - } - - // Not found, the VM doesn't belong to any known scale sets. - if ssName == "" { - return nil, nil - } - - ctx, cancel := getContextWithCancel() - defer cancel() - result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID) - exists, message, realErr := checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - - if !exists { - klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) - return nil, nil - } - - // Get instanceView for vmssVM. - if result.InstanceView == nil { - viewCtx, viewCancel := getContextWithCancel() - defer viewCancel() - view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID) - // It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again. - exists, message, realErr = checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - if !exists { - klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) - return nil, nil - } - - result.InstanceView = &view - } - - return &result, nil - } - - return newTimedcache(vmssVMCacheTTL, getter) -} - -func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) { - getScaleSetName := func(nodeName string) (string, error) { - nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey) - if err != nil { - return "", err - } - - realMapping := nodeNameMapping.(nodeNameToScaleSetMapping) - if ssName, ok := realMapping[nodeName]; ok { - return ssName, nil - } - - return "", nil - } - - ssName, err := getScaleSetName(nodeName) - if err != nil { - return "", err - } - - if ssName != "" { - return ssName, nil - } - - // ssName is still not found, it is likely that new Nodes are created. - // Force refresh the cache and try again. - ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey) - return getScaleSetName(nodeName) -} - -func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) { - cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey) +func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt cacheReadType) (bool, error) { + cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt) if err != nil { return false, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go index 5da34a1f407..cc2851ae783 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go @@ -90,9 +90,9 @@ func ignoreStatusForbiddenFromError(err error) error { /// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache /// The service side has throttling control that delays responses if there're multiple requests onto certain vm /// resource request in short period. -func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) { +func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt cacheReadType) (vm compute.VirtualMachine, err error) { vmName := string(nodeName) - cachedVM, err := az.vmCache.Get(vmName) + cachedVM, err := az.vmCache.Get(vmName, crt) if err != nil { return vm, err } @@ -104,8 +104,8 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM return *(cachedVM.(*compute.VirtualMachine)), nil } -func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { - cachedRt, err := az.rtCache.Get(az.RouteTableName) +func (az *Cloud) getRouteTable(crt cacheReadType) (routeTable network.RouteTable, exists bool, err error) { + cachedRt, err := az.rtCache.Get(az.RouteTableName, crt) if err != nil { return routeTable, false, err } @@ -168,8 +168,8 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet return subnet, exists, err } -func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) { - cachedLB, err := az.lbCache.Get(name) +func (az *Cloud) getAzureLoadBalancer(name string, crt cacheReadType) (lb network.LoadBalancer, exists bool, err error) { + cachedLB, err := az.lbCache.Get(name, crt) if err != nil { return lb, false, err } @@ -181,12 +181,12 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi return *(cachedLB.(*network.LoadBalancer)), true, nil } -func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) { +func (az *Cloud) getSecurityGroup(crt cacheReadType) (nsg network.SecurityGroup, err error) { if az.SecurityGroupName == "" { return nsg, fmt.Errorf("securityGroupName is not configured") } - securityGroup, err := az.nsgCache.Get(az.SecurityGroupName) + securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt) if err != nil { return nsg, err } @@ -350,7 +350,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } newLBName := matches[1] - newLBNameTrimmed := strings.TrimRight(newLBName, InternalLoadBalancerNameSuffix) + newLBNameTrimmed := strings.TrimSuffix(newLBName, InternalLoadBalancerNameSuffix) for _, backendPool := range existingBackendPools { matches := backendPoolIDRE.FindStringSubmatch(backendPool) if len(matches) != 2 { @@ -358,7 +358,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } lbName := matches[1] - if !strings.EqualFold(strings.TrimRight(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + if !strings.EqualFold(strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) { return false, lbName, nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go index e5c3c889058..43ca994750d 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go @@ -53,7 +53,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // If the node is not running with availability zones, then it will fall back to fault domain. func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata() + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go index 1de777909c0..70f46dc3a40 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go @@ -607,11 +607,9 @@ func (g *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, g.clientBuilder = clientBuilder g.client = clientBuilder.ClientOrDie("cloud-provider") - if g.OnXPN() { - g.eventBroadcaster = record.NewBroadcaster() - g.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: g.client.CoreV1().Events("")}) - g.eventRecorder = g.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "g-cloudprovider"}) - } + g.eventBroadcaster = record.NewBroadcaster() + g.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: g.client.CoreV1().Events("")}) + g.eventRecorder = g.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "g-cloudprovider"}) go g.watchClusterID(stop) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go index 70b84d35b11..8cccd66a2ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go @@ -27,6 +27,9 @@ const ( // AlphaFeatureILBSubsets allows InternalLoadBalancer services to include a subset // of cluster nodes as backends instead of all nodes. AlphaFeatureILBSubsets = "ILBSubsets" + // AlphaFeatureILBCustomSubnet allows InternalLoadBalancer services to specify a + // network subnet to allocate ip addresses from. + AlphaFeatureILBCustomSubnet = "ILBCustomSubnet" ) // AlphaFeatureGate contains a mapping of alpha features to whether they are enabled diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go index c2a3602f714..2280f26d917 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go @@ -58,6 +58,11 @@ const ( // created in. ServiceAnnotationILBAllowGlobalAccess = "networking.gke.io/internal-load-balancer-allow-global-access" + // ServiceAnnotationILBSubnet is annotated on a service with the name of the subnetwork + // the ILB IP Address should be assigned from. By default, this is the subnetwork that the + // cluster is created in. + ServiceAnnotationILBSubnet = "networking.gke.io/internal-load-balancer-subnet" + // NetworkTierAnnotationKey is annotated on a Service object to indicate which // network tier a GCP LB should use. The valid values are "Standard" and // "Premium" (default). @@ -132,6 +137,8 @@ func GetServiceNetworkTier(service *v1.Service) (cloud.NetworkTier, error) { type ILBOptions struct { // AllowGlobalAccess Indicates whether global access is allowed for the LoadBalancer AllowGlobalAccess bool + // SubnetName indicates which subnet the LoadBalancer VIPs should be assigned from + SubnetName string } // GetLoadBalancerAnnotationAllowGlobalAccess returns if global access is enabled @@ -139,3 +146,11 @@ type ILBOptions struct { func GetLoadBalancerAnnotationAllowGlobalAccess(service *v1.Service) bool { return service.Annotations[ServiceAnnotationILBAllowGlobalAccess] == "true" } + +// GetLoadBalancerAnnotationSubnet returns the configured subnet to assign LoadBalancer IP from. +func GetLoadBalancerAnnotationSubnet(service *v1.Service) string { + if val, exists := service.Annotations[ServiceAnnotationILBSubnet]; exists { + return val + } + return "" +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go index 35e871d7e5f..61cdcb5eca7 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go @@ -40,18 +40,25 @@ type cidrs struct { } var ( - lbSrcRngsFlag cidrs + l4LbSrcRngsFlag cidrs + l7lbSrcRngsFlag cidrs ) func init() { var err error - // LB L7 proxies and all L3/4/7 health checkers have client addresses within these known CIDRs. - lbSrcRngsFlag.ipn, err = utilnet.ParseIPNets([]string{"130.211.0.0/22", "35.191.0.0/16", "209.85.152.0/22", "209.85.204.0/22"}...) + // L3/4 health checkers have client addresses within these known CIDRs. + l4LbSrcRngsFlag.ipn, err = utilnet.ParseIPNets([]string{"130.211.0.0/22", "35.191.0.0/16", "209.85.152.0/22", "209.85.204.0/22"}...) + if err != nil { + panic("Incorrect default GCE L3/4 source ranges") + } + // L7 health checkers have client addresses within these known CIDRs. + l7lbSrcRngsFlag.ipn, err = utilnet.ParseIPNets([]string{"130.211.0.0/22", "35.191.0.0/16"}...) if err != nil { panic("Incorrect default GCE L7 source ranges") } - flag.Var(&lbSrcRngsFlag, "cloud-provider-gce-lb-src-cidrs", "CIDRs opened in GCE firewall for LB traffic proxy & health checks") + flag.Var(&l4LbSrcRngsFlag, "cloud-provider-gce-lb-src-cidrs", "CIDRs opened in GCE firewall for L4 LB traffic proxy & health checks") + flag.Var(&l7lbSrcRngsFlag, "cloud-provider-gce-l7lb-src-cidrs", "CIDRs opened in GCE firewall for L7 LB traffic proxy & health checks") } // String is the method to format the flag's value, part of the flag.Value interface. @@ -82,10 +89,16 @@ func (c *cidrs) Set(value string) error { return nil } -// LoadBalancerSrcRanges contains the ranges of ips used by the GCE load balancers (l4 & L7) +// L4LoadBalancerSrcRanges contains the ranges of ips used by the L3/L4 GCE load balancers +// for proxying client requests and performing health checks. +func L4LoadBalancerSrcRanges() []string { + return l4LbSrcRngsFlag.ipn.StringSlice() +} + +// L7LoadBalancerSrcRanges contains the ranges of ips used by the GCE load balancers L7 // for proxying client requests and performing health checks. -func LoadBalancerSrcRanges() []string { - return lbSrcRngsFlag.ipn.StringSlice() +func L7LoadBalancerSrcRanges() []string { + return l7lbSrcRngsFlag.ipn.StringSlice() } // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go index f9ff6853ac1..f8a7074c022 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go @@ -876,7 +876,7 @@ func (g *Cloud) ensureHTTPHealthCheckFirewall(svc *v1.Service, serviceName, ipAd if !isNodesHealthCheck { desc = makeFirewallDescription(serviceName, ipAddress) } - sourceRanges := lbSrcRngsFlag.ipn + sourceRanges := l4LbSrcRngsFlag.ipn ports := []v1.ServicePort{{Protocol: "tcp", Port: hcPort}} fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go index 31a8beb488d..72d554756dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go @@ -22,11 +22,12 @@ import ( "context" "encoding/json" "fmt" - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "sort" "strconv" "strings" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" @@ -48,7 +49,7 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v } nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} - ports, protocol := getPortsAndProtocol(svc.Spec.Ports) + ports, _, protocol := getPortsAndProtocol(svc.Spec.Ports) if protocol != v1.ProtocolTCP && protocol != v1.ProtocolUDP { return nil, fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(protocol)) } @@ -58,6 +59,12 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v g.eventRecorder.Event(svc, v1.EventTypeWarning, "ILBOptionsIgnored", "Internal LoadBalancer options are not supported with Legacy Networks.") options = ILBOptions{} } + if !g.AlphaFeatureGate.Enabled(AlphaFeatureILBCustomSubnet) { + if options.SubnetName != "" { + g.eventRecorder.Event(svc, v1.EventTypeWarning, "ILBCustomSubnetOptionIgnored", "Internal LoadBalancer CustomSubnet options ignored as the feature gate is disabled.") + options.SubnetName = "" + } + } loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) sharedBackend := shareBackendService(svc) @@ -98,23 +105,32 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v return nil, err } + subnetworkURL := g.SubnetworkURL() + if g.AlphaFeatureGate.Enabled(AlphaFeatureILBCustomSubnet) { + // If this feature is enabled, changes to subnet annotation will be + // picked up and reflected in the forwarding rule. + // Removing the annotation will set the forwarding rule to use the default subnet. + if options.SubnetName != "" { + subnetworkURL = gceSubnetworkURL("", g.networkProjectID, g.region, options.SubnetName) + } + } else { + // TODO(84885) remove this once ILBCustomSubnet goes beta. + if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { + // If the ILB already exists, continue using the subnet that it's already using. + // This is to support existing ILBs that were setup using the wrong subnet - https://github.com/kubernetes/kubernetes/pull/57861 + subnetworkURL = existingFwdRule.Subnetwork + } + } // Determine IP which will be used for this LB. If no forwarding rule has been established // or specified in the Service spec, then requestedIP = "". - requestedIP := determineRequestedIP(svc, existingFwdRule) - ipToUse := requestedIP + ipToUse := ilbIPToUse(svc, existingFwdRule, subnetworkURL) - // If the ILB already exists, continue using the subnet that it's already using. - // This is to support existing ILBs that were setup using the wrong subnet. - subnetworkURL := g.SubnetworkURL() - if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { - // external LBs have an empty Subnetwork field. - subnetworkURL = existingFwdRule.Subnetwork - } + klog.V(2).Infof("ensureInternalLoadBalancer(%v): Using subnet %s for LoadBalancer IP %s", loadBalancerName, options.SubnetName, ipToUse) var addrMgr *addressManager // If the network is not a legacy network, use the address manager if !g.IsLegacyNetwork() { - addrMgr = newAddressManager(g, nm.String(), g.Region(), subnetworkURL, loadBalancerName, requestedIP, cloud.SchemeInternal) + addrMgr = newAddressManager(g, nm.String(), g.Region(), subnetworkURL, loadBalancerName, ipToUse, cloud.SchemeInternal) ipToUse, err = addrMgr.HoldAddress() if err != nil { return nil, err @@ -216,7 +232,7 @@ func (g *Cloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v } // Generate the backend service name - _, protocol := getPortsAndProtocol(svc.Spec.Ports) + _, _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, shareBackendService(svc), scheme, protocol, svc.Spec.SessionAffinity) @@ -226,7 +242,7 @@ func (g *Cloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error { loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) - _, protocol := getPortsAndProtocol(svc.Spec.Ports) + _, _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal sharedBackend := shareBackendService(svc) sharedHealthCheck := !servicehelpers.RequestsOnlyLocalTraffic(svc) @@ -248,14 +264,26 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, return err } - klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) - if err := ignoreNotFound(g.DeleteFirewall(loadBalancerName)); err != nil { - if isForbidden(err) && g.OnXPN() { - klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) - g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, g.NetworkProjectID())) - } else { + deleteFunc := func(fwName string) error { + if err := ignoreNotFound(g.DeleteFirewall(fwName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) + return nil + } return err } + return nil + } + fwName := MakeFirewallName(loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall %s for traffic", + loadBalancerName, fwName) + if err := deleteFunc(fwName); err != nil { + return err + } + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting legacy name firewall for traffic", loadBalancerName) + if err := deleteFunc(loadBalancerName); err != nil { + return err } hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck) @@ -317,7 +345,7 @@ func (g *Cloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName s return nil } -func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { +func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, portRanges []string, protocol v1.Protocol, nodes []*v1.Node, legacyFwName string) error { klog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) targetTags, err := g.GetNodeTags(nodeNames(nodes)) if err != nil { @@ -328,6 +356,29 @@ func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, s if err != nil && !isNotFound(err) { return err } + // TODO(84821) Remove legacyFwName logic after 3 releases, so there would have been atleast 2 master upgrades that would + // have triggered service sync and deletion of the legacy rules. + if legacyFwName != "" { + // Check for firewall named with the legacy naming scheme and delete if found. + legacyFirewall, err := g.GetFirewall(legacyFwName) + if err != nil && !isNotFound(err) { + return err + } + if legacyFirewall != nil && existingFirewall != nil { + // Delete the legacyFirewall rule if the new one was already created. If not, it will be deleted in the + // next sync or when the service is deleted. + defer func() { + err = g.DeleteFirewall(legacyFwName) + if err != nil { + klog.Errorf("Failed to delete legacy firewall %s for service %s/%s, err %v", + legacyFwName, svc.Namespace, svc.Name, err) + } else { + klog.V(2).Infof("Successfully deleted legacy firewall %s for service %s/%s", + legacyFwName, svc.Namespace, svc.Name) + } + }() + } + } expectedFirewall := &compute.Firewall{ Name: fwName, @@ -338,7 +389,7 @@ func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, s Allowed: []*compute.FirewallAllowed{ { IPProtocol: strings.ToLower(string(protocol)), - Ports: ports, + Ports: portRanges, }, }, } @@ -371,20 +422,20 @@ func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, s func (g *Cloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error { // First firewall is for ingress traffic fwDesc := makeFirewallDescription(nm.String(), ipAddress) - ports, protocol := getPortsAndProtocol(svc.Spec.Ports) + _, portRanges, protocol := getPortsAndProtocol(svc.Spec.Ports) sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(svc) if err != nil { return err } - err = g.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes) + err = g.ensureInternalFirewall(svc, MakeFirewallName(loadBalancerName), fwDesc, sourceRanges.StringSlice(), portRanges, protocol, nodes, loadBalancerName) if err != nil { return err } // Second firewall is for health checking nodes / services fwHCName := makeHealthCheckFirewallName(loadBalancerName, clusterID, sharedHealthCheck) - hcSrcRanges := LoadBalancerSrcRanges() - return g.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes) + hcSrcRanges := L4LoadBalancerSrcRanges() + return g.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes, "") } func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { @@ -697,17 +748,62 @@ func backendSvcEqual(a, b *compute.BackendService) bool { backendsListEqual(a.Backends, b.Backends) } -func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1.Protocol) { +func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, portRanges []string, protocol v1.Protocol) { if len(svcPorts) == 0 { - return []string{}, v1.ProtocolUDP + return []string{}, []string{}, v1.ProtocolUDP } // GCP doesn't support multiple protocols for a single load balancer protocol = svcPorts[0].Protocol + portInts := []int{} for _, p := range svcPorts { ports = append(ports, strconv.Itoa(int(p.Port))) + portInts = append(portInts, int(p.Port)) } - return ports, protocol + + return ports, getPortRanges(portInts), protocol +} + +func getPortRanges(ports []int) (ranges []string) { + if len(ports) < 1 { + return ranges + } + sort.Ints(ports) + + start := ports[0] + prev := ports[0] + for ix, current := range ports { + switch { + case current == prev: + // Loop over duplicates, except if the end of list is reached. + if ix == len(ports)-1 { + if start == current { + ranges = append(ranges, fmt.Sprintf("%d", current)) + } else { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, current)) + } + } + case current == prev+1: + // continue the streak, create the range if this is the last element in the list. + if ix == len(ports)-1 { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, current)) + } + default: + // current is not prev + 1, streak is broken. Construct the range and handle last element case. + if start == prev { + ranges = append(ranges, fmt.Sprintf("%d", prev)) + } else { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, prev)) + } + if ix == len(ports)-1 { + ranges = append(ranges, fmt.Sprintf("%d", current)) + } + // reset start element + start = current + } + prev = current + } + return ranges } func (g *Cloud) getBackendServiceLink(name string) string { @@ -723,20 +819,27 @@ func getNameFromLink(link string) string { return fields[len(fields)-1] } -func determineRequestedIP(svc *v1.Service, fwdRule *compute.ForwardingRule) string { +// ilbIPToUse determines which IP address needs to be used in the ForwardingRule. If an IP has been +// specified by the user, that is used. If there is an existing ForwardingRule, the ip address from +// that is reused. In case a subnetwork change is requested, the existing ForwardingRule IP is ignored. +func ilbIPToUse(svc *v1.Service, fwdRule *compute.ForwardingRule, requestedSubnet string) string { if svc.Spec.LoadBalancerIP != "" { return svc.Spec.LoadBalancerIP } - - if fwdRule != nil { - return fwdRule.IPAddress + if fwdRule == nil { + return "" } - - return "" + if requestedSubnet != fwdRule.Subnetwork { + // reset ip address since subnet is being changed. + return "" + } + return fwdRule.IPAddress } func getILBOptions(svc *v1.Service) ILBOptions { - return ILBOptions{AllowGlobalAccess: GetLoadBalancerAnnotationAllowGlobalAccess(svc)} + return ILBOptions{AllowGlobalAccess: GetLoadBalancerAnnotationAllowGlobalAccess(svc), + SubnetName: GetLoadBalancerAnnotationSubnet(svc), + } } // forwardingRuleComposite is a composite type encapsulating both the GA and Beta ForwardingRules. @@ -765,7 +868,8 @@ func (f *forwardingRuleComposite) Equal(other *forwardingRuleComposite) bool { f.lbScheme == other.lbScheme && equalStringSets(f.ports, other.ports) && f.backendService == other.backendService && - f.allowGlobalAccess == other.allowGlobalAccess + f.allowGlobalAccess == other.allowGlobalAccess && + f.subnetwork == other.subnetwork } // toForwardingRuleComposite converts a compute beta or GA ForwardingRule into the composite type diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go index a871c54d5dd..046c599dd4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -110,7 +110,7 @@ func getProjectAndZone() (string, string, error) { } func (g *Cloud) raiseFirewallChangeNeededEvent(svc *v1.Service, cmd string) { - msg := fmt.Sprintf("Firewall change required by network admin: `%v`", cmd) + msg := fmt.Sprintf("Firewall change required by security admin: `%v`", cmd) if g.eventRecorder != nil && svc != nil { g.eventRecorder.Event(svc, v1.EventTypeNormal, "LoadBalancerManualChange", msg) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/BUILD index ce61e815cc0..7f989c1fde3 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/BUILD @@ -39,7 +39,6 @@ go_library( "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions:go_default_library", @@ -65,10 +64,10 @@ go_library( "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports:go_default_library", "//vendor/github.com/gophercloud/gophercloud/pagination:go_default_library", "//vendor/github.com/mitchellh/mapstructure:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/mount:go_default_library", ], ) @@ -103,9 +102,6 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/metadata.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/metadata.go index 7562a684562..c1b65de0c4d 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/metadata.go @@ -30,8 +30,8 @@ import ( "strings" "k8s.io/klog" - osmount "k8s.io/legacy-cloud-providers/openstack/util/mount" "k8s.io/utils/exec" + "k8s.io/utils/mount" ) const ( @@ -125,7 +125,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { klog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) - mounter := osmount.New("" /* default mount path */) + mounter := mount.New("" /* default mount path */) err = mounter.Mount(dev, mntdir, "iso9660", []string{"ro"}) if err != nil { err = mounter.Mount(dev, mntdir, "vfat", []string{"ro"}) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go index d7741097a4e..c3db860b5e4 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go @@ -221,7 +221,6 @@ func (cfg Config) toAuth3Options() tokens3.AuthOptions { func configFromEnv() (cfg Config, ok bool) { cfg.Global.AuthURL = os.Getenv("OS_AUTH_URL") cfg.Global.Username = os.Getenv("OS_USERNAME") - cfg.Global.Password = os.Getenv("OS_PASSWORD") cfg.Global.Region = os.Getenv("OS_REGION_NAME") cfg.Global.UserID = os.Getenv("OS_USER_ID") cfg.Global.TrustID = os.Getenv("OS_TRUST_ID") diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go index 25c86a09677..0a4c7b5a363 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go @@ -999,9 +999,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if lbaas.opts.ManageSecurityGroups { err := lbaas.ensureSecurityGroup(clusterName, apiService, nodes, loadbalancer) if err != nil { - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(ctx, clusterName, apiService) - return status, err + return status, fmt.Errorf("Error reconciling security groups for LB service %v/%v: %v", apiService.Namespace, apiService.Name, err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go index 6bc266d7bca..95e38854912 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go @@ -35,6 +35,7 @@ import ( cloudvolume "k8s.io/cloud-provider/volume" volerr "k8s.io/cloud-provider/volume/errors" volumehelpers "k8s.io/cloud-provider/volume/helpers" + "k8s.io/component-base/metrics" "github.com/gophercloud/gophercloud" volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" @@ -42,8 +43,6 @@ import ( volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" volumes_v3 "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" ) @@ -745,8 +744,8 @@ func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo // recordOpenstackOperationMetric records openstack operation metrics func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) { if err != nil { - openstackAPIRequestErrors.With(prometheus.Labels{"request": operation}).Inc() + openstackAPIRequestErrors.With(metrics.Labels{"request": operation}).Inc() } else { - openstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken) + openstackOperationsLatency.With(metrics.Labels{"request": operation}).Observe(timeTaken) } } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/BUILD deleted file mode 100644 index 79921a0102e..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/BUILD +++ /dev/null @@ -1,43 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "mount.go", - "mount_linux.go", - "mount_unsupported.go", - "mount_windows.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount", - importpath = "k8s.io/legacy-cloud-providers/openstack/util/mount", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/io:go_default_library", - "//vendor/k8s.io/utils/path:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/path:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount.go deleted file mode 100644 index 06a512ef412..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount.go +++ /dev/null @@ -1,380 +0,0 @@ -// +build !providerless - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mount provides a linux-centric library for mounting filesystems/ -// Vendored for the OpenStack provider and will be removed in 1.17 -package mount - -import ( - "fmt" - "os" - "path/filepath" - "strings" -) - -// FileType is one of the types of supported Linux file types. -type FileType string - -// A complete list of the support file types in the mount package. -const ( - // Default mount command if mounter path is not specified - defaultMountCommand = "mount" - MountsInGlobalPDPath = "mounts" - FileTypeDirectory FileType = "Directory" - FileTypeFile FileType = "File" - FileTypeSocket FileType = "Socket" - FileTypeCharDev FileType = "CharDevice" - FileTypeBlockDev FileType = "BlockDevice" -) - -// Interface is the general interface to filesystem mounts. -type Interface interface { - // Mount mounts source to target as fstype with given options. - Mount(source string, target string, fstype string, options []string) error - // Unmount unmounts given target. - Unmount(target string) error - // List returns a list of all mounted filesystems. This can be large. - // On some platforms, reading mounts is not guaranteed consistent (i.e. - // it could change between chunked reads). This is guaranteed to be - // consistent. - List() ([]MntPoint, error) - // IsMountPointMatch determines if the mountpoint matches the dir - IsMountPointMatch(mp MntPoint, dir string) bool - // IsNotMountPoint determines if a directory is a mountpoint. - // It should return ErrNotExist when the directory does not exist. - // IsNotMountPoint is more expensive than IsLikelyNotMountPoint. - // IsNotMountPoint detects bind mounts in linux. - // IsNotMountPoint enumerates all the mountpoints using List() and - // the list of mountpoints may be large, then it uses - // IsMountPointMatch to evaluate whether the directory is a mountpoint - IsNotMountPoint(file string) (bool, error) - // IsLikelyNotMountPoint uses heuristics to determine if a directory - // is a mountpoint. - // It should return ErrNotExist when the directory does not exist. - // IsLikelyNotMountPoint does NOT properly detect all mountpoint types - // most notably linux bind mounts. - IsLikelyNotMountPoint(file string) (bool, error) - // DeviceOpened determines if the device is in use elsewhere - // on the system, i.e. still mounted. - DeviceOpened(pathname string) (bool, error) - // PathIsDevice determines if a path is a device. - PathIsDevice(pathname string) (bool, error) - // GetDeviceNameFromMount finds the device name by checking the mount path - // to get the global mount path which matches its plugin directory - GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) - // MakeRShared checks that given path is on a mount with 'rshared' mount - // propagation. If not, it bind-mounts the path as rshared. - MakeRShared(path string) error - // GetFileType checks for file/directory/socket/block/character devices. - // Will operate in the host mount namespace if kubelet is running in a container - GetFileType(pathname string) (FileType, error) - // MakeFile creates an empty file. - // Will operate in the host mount namespace if kubelet is running in a container - MakeFile(pathname string) error - // MakeDir creates a new directory. - // Will operate in the host mount namespace if kubelet is running in a container - MakeDir(pathname string) error - // SafeMakeDir creates subdir within given base. It makes sure that the - // created directory does not escape given base directory mis-using - // symlinks. Note that the function makes sure that it creates the directory - // somewhere under the base, nothing else. E.g. if the directory already - // exists, it may exist outside of the base due to symlinks. - // This method should be used if the directory to create is inside volume - // that's under user control. User must not be able to use symlinks to - // escape the volume to create directories somewhere else. - SafeMakeDir(subdir string, base string, perm os.FileMode) error - // Will operate in the host mount namespace if kubelet is running in a container. - // Error is returned on any other error than "file not found". - ExistsPath(pathname string) (bool, error) - // EvalHostSymlinks returns the path name after evaluating symlinks. - // Will operate in the host mount namespace if kubelet is running in a container. - EvalHostSymlinks(pathname string) (string, error) - // CleanSubPaths removes any bind-mounts created by PrepareSafeSubpath in given - // pod volume directory. - CleanSubPaths(podDir string, volumeName string) error - // PrepareSafeSubpath does everything that's necessary to prepare a subPath - // that's 1) inside given volumePath and 2) immutable after this call. - // - // newHostPath - location of prepared subPath. It should be used instead of - // hostName when running the container. - // cleanupAction - action to run when the container is running or it failed to start. - // - // CleanupAction must be called immediately after the container with given - // subpath starts. On the other hand, Interface.CleanSubPaths must be called - // when the pod finishes. - PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) - // GetMountRefs finds all mount references to the path, returns a - // list of paths. Path could be a mountpoint path, device or a normal - // directory (for bind mount). - GetMountRefs(pathname string) ([]string, error) - // GetFSGroup returns FSGroup of the path. - GetFSGroup(pathname string) (int64, error) - // GetSELinuxSupport returns true if given path is on a mount that supports - // SELinux. - GetSELinuxSupport(pathname string) (bool, error) - // GetMode returns permissions of the path. - GetMode(pathname string) (os.FileMode, error) -} - -// Subpath is the string path of the volume mount. -type Subpath struct { - // index of the VolumeMount for this container - VolumeMountIndex int - // Full path to the subpath directory on the host - Path string - // name of the volume that is a valid directory name. - VolumeName string - // Full path to the volume path - VolumePath string - // Path to the pod's directory, including pod UID - PodDir string - // Name of the container - ContainerName string -} - -// Exec executes command where mount utilities are. This can be either the host, -// container where kubelet runs or even a remote pod with mount utilities. -// Usual pkg/util/exec interface is not used because kubelet.RunInContainer does -// not provide stdin/stdout/stderr streams. -type Exec interface { - // Run executes a command and returns its stdout + stderr combined in one - // stream. - Run(cmd string, args ...string) ([]byte, error) -} - -// Compile-time check to ensure all Mounter implementations satisfy -// the mount interface -var _ Interface = &Mounter{} - -// MntPoint represents a single line in /proc/mounts or /etc/fstab. -type MntPoint struct { - Device string - Path string - Type string - Opts []string - Freq int - Pass int -} - -// SafeFormatAndMount probes a device to see if it is formatted. -// Namely it checks to see if a file system is present. If so it -// mounts it otherwise the device is formatted first then mounted. -type SafeFormatAndMount struct { - Interface - Exec -} - -// FormatAndMount formats the given disk, if needed, and mounts it. -// That is if the disk is not formatted and it is not being mounted as -// read-only it will format it first then mount it. Otherwise, if the -// disk is already formatted or it is being mounted as read-only, it -// will be mounted without formatting. -func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, fstype string, options []string) error { - return mounter.formatAndMount(source, target, fstype, options) -} - -// GetDeviceNameFromMount when given a mnt point, find the device from /proc/mounts -// returns the device name, reference count, and error code. -func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, error) { - mps, err := mounter.List() - if err != nil { - return "", 0, err - } - - // Find the device name. - // FIXME if multiple devices mounted on the same mount path, only the first one is returned - device := "" - // If mountPath is symlink, need get its target path. - slTarget, err := filepath.EvalSymlinks(mountPath) - if err != nil { - slTarget = mountPath - } - for i := range mps { - if mps[i].Path == slTarget { - device = mps[i].Device - break - } - } - - // Find all references to the device. - refCount := 0 - for i := range mps { - if mps[i].Device == device { - refCount++ - } - } - return device, refCount, nil -} - -// IsNotMountPoint determines if a directory is a mountpoint. -// It should return ErrNotExist when the directory does not exist. -// This method uses the List() of all mountpoints -// It is more extensive than IsLikelyNotMountPoint -// and it detects bind mounts in linux -func IsNotMountPoint(mounter Interface, file string) (bool, error) { - // IsLikelyNotMountPoint provides a quick check - // to determine whether file IS A mountpoint - notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file) - if notMntErr != nil && os.IsPermission(notMntErr) { - // We were not allowed to do the simple stat() check, e.g. on NFS with - // root_squash. Fall back to /proc/mounts check below. - notMnt = true - notMntErr = nil - } - if notMntErr != nil { - return notMnt, notMntErr - } - // identified as mountpoint, so return this fact - if notMnt == false { - return notMnt, nil - } - - // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts - resolvedFile, err := mounter.EvalHostSymlinks(file) - if err != nil { - return true, err - } - - // check all mountpoints since IsLikelyNotMountPoint - // is not reliable for some mountpoint types - mountPoints, mountPointsErr := mounter.List() - if mountPointsErr != nil { - return notMnt, mountPointsErr - } - for _, mp := range mountPoints { - if mounter.IsMountPointMatch(mp, resolvedFile) { - notMnt = false - break - } - } - return notMnt, nil -} - -// isBind detects whether a bind mount is being requested and makes the remount options to -// use in case of bind mount, due to the fact that bind mount doesn't respect mount options. -// The list equals: -// options - 'bind' + 'remount' (no duplicate) -func isBind(options []string) (bool, []string, []string) { - // Because we have an FD opened on the subpath bind mount, the "bind" option - // needs to be included, otherwise the mount target will error as busy if you - // remount as readonly. - // - // As a consequence, all read only bind mounts will no longer change the underlying - // volume mount to be read only. - bindRemountOpts := []string{"bind", "remount"} - bind := false - bindOpts := []string{"bind"} - - // _netdev is a userspace mount option and does not automatically get added when - // bind mount is created and hence we must carry it over. - if checkForNetDev(options) { - bindOpts = append(bindOpts, "_netdev") - } - - for _, option := range options { - switch option { - case "bind": - bind = true - case "remount": - default: - bindRemountOpts = append(bindRemountOpts, option) - } - } - - return bind, bindOpts, bindRemountOpts -} - -func checkForNetDev(options []string) bool { - for _, option := range options { - if option == "_netdev" { - return true - } - } - return false -} - -// HasMountRefs checks the mountPath contains any of the mountRefs. -// This is a workaround for the unmount device issue caused by gci mounter. -// In GCI cluster, if gci mounter is used for mounting, the container started by mounter -// script will cause additional mounts created in the container. Since these mounts are -// irrelevant to the original mounts, they should be not considered when checking the -// mount references. Current solution is to filter out those mount paths that contain -// the string of original mount path. -// Plan to work on better approach to solve this issue. -func HasMountRefs(mountPath string, mountRefs []string) bool { - count := 0 - for _, ref := range mountRefs { - if !strings.Contains(ref, mountPath) { - count = count + 1 - } - } - return count > 0 -} - -// PathWithinBase checks if give path is within given base directory. -func PathWithinBase(fullPath, basePath string) bool { - rel, err := filepath.Rel(basePath, fullPath) - if err != nil { - return false - } - if startsWithBackstep(rel) { - // Needed to escape the base path - return false - } - return true -} - -// startsWithBackstep checks if the given path starts with a backstep segment -func startsWithBackstep(rel string) bool { - // normalize to / and check for ../ - return rel == ".." || strings.HasPrefix(filepath.ToSlash(rel), "../") -} - -// getFileType checks for file/directory/socket and block/character devices -func getFileType(pathname string) (FileType, error) { - var pathType FileType - info, err := os.Stat(pathname) - if os.IsNotExist(err) { - return pathType, fmt.Errorf("path %q does not exist", pathname) - } - // err in call to os.Stat - if err != nil { - return pathType, err - } - - // checks whether the mode is the target mode - isSpecificMode := func(mode, targetMode os.FileMode) bool { - return mode&targetMode == targetMode - } - - mode := info.Mode() - if mode.IsDir() { - return FileTypeDirectory, nil - } else if mode.IsRegular() { - return FileTypeFile, nil - } else if isSpecificMode(mode, os.ModeSocket) { - return FileTypeSocket, nil - } else if isSpecificMode(mode, os.ModeDevice) { - if isSpecificMode(mode, os.ModeCharDevice) { - return FileTypeCharDev, nil - } - return FileTypeBlockDev, nil - } - - return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device") -} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go deleted file mode 100644 index 5dda471c663..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go +++ /dev/null @@ -1,1345 +0,0 @@ -// +build linux,!providerless - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mount - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - - "golang.org/x/sys/unix" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog" - utilexec "k8s.io/utils/exec" - utilio "k8s.io/utils/io" - utilpath "k8s.io/utils/path" -) - -const ( - // How many times to retry for a consistent read of /proc/mounts. - maxListTries = 3 - // Number of fields per line in /proc/mounts as per the fstab man page. - expectedNumFieldsPerLine = 6 - // At least number of fields per line in /proc//mountinfo. - expectedAtLeastNumFieldsPerMountInfo = 10 - // Location of the mount file to use - procMountsPath = "/proc/mounts" - // Location of the mountinfo file - procMountInfoPath = "/proc/self/mountinfo" - // 'fsck' found errors and corrected them - fsckErrorsCorrected = 1 - // 'fsck' found errors but exited without correcting them - fsckErrorsUncorrected = 4 - - // place for subpath mounts - containerSubPathDirectoryName = "volume-subpaths" - // syscall.Openat flags used to traverse directories not following symlinks - nofollowFlags = unix.O_RDONLY | unix.O_NOFOLLOW - // flags for getting file descriptor without following the symlink - openFDFlags = unix.O_NOFOLLOW | unix.O_PATH -) - -// Mounter provides the default implementation of mount.Interface -// for the linux platform. This implementation assumes that the -// kubelet is running in the host's root mount namespace. -type Mounter struct { - mounterPath string - withSystemd bool -} - -// New returns a mount.Interface for the current system. -// It provides options to override the default mounter behavior. -// mounterPath allows using an alternative to `/bin/mount` for mounting. -func New(mounterPath string) Interface { - return &Mounter{ - mounterPath: mounterPath, - withSystemd: detectSystemd(), - } -} - -// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must -// be an empty string in case it's not required, e.g. for remount, or for auto filesystem -// type, where kernel handles fstype for you. The mount 'options' is a list of options, -// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is -// required, call Mount with an empty string list or nil. -func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { - // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. - // All Linux distros are expected to be shipped with a mount utility that a support bind mounts. - mounterPath := "" - bind, bindOpts, bindRemountOpts := isBind(options) - if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts) - if err != nil { - return err - } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts) - } - // The list of filesystems that require containerized mounter on GCI image cluster - fsTypesNeedMounter := sets.NewString("nfs", "glusterfs", "ceph", "cifs") - if fsTypesNeedMounter.Has(fstype) { - mounterPath = mounter.mounterPath - } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options) -} - -// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used. -func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error { - mountArgs := makeMountArgs(source, target, fstype, options) - if len(mounterPath) > 0 { - mountArgs = append([]string{mountCmd}, mountArgs...) - mountCmd = mounterPath - } - - if mounter.withSystemd { - // Try to run mount via systemd-run --scope. This will escape the - // service where kubelet runs and any fuse daemons will be started in a - // specific scope. kubelet service than can be restarted without killing - // these fuse daemons. - // - // Complete command line (when mounterPath is not used): - // systemd-run --description=... --scope -- mount -t - // - // Expected flow: - // * systemd-run creates a transient scope (=~ cgroup) and executes its - // argument (/bin/mount) there. - // * mount does its job, forks a fuse daemon if necessary and finishes. - // (systemd-run --scope finishes at this point, returning mount's exit - // code and stdout/stderr - thats one of --scope benefits). - // * systemd keeps the fuse daemon running in the scope (i.e. in its own - // cgroup) until the fuse daemon dies (another --scope benefit). - // Kubelet service can be restarted and the fuse daemon survives. - // * When the fuse daemon dies (e.g. during unmount) systemd removes the - // scope automatically. - // - // systemd-mount is not used because it's too new for older distros - // (CentOS 7, Debian Jessie). - mountCmd, mountArgs = addSystemdScope("systemd-run", target, mountCmd, mountArgs) - } //else { - // No systemd-run on the host (or we failed to check it), assume kubelet - // does not run as a systemd service. - // No code here, mountCmd and mountArgs are already populated. - //} - - klog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) - command := exec.Command(mountCmd, mountArgs...) - output, err := command.CombinedOutput() - if err != nil { - args := strings.Join(mountArgs, " ") - klog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) - return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s", - err, mountCmd, args, string(output)) - } - return err -} - -// detectSystemd returns true if OS runs with systemd as init. When not sure -// (permission errors, ...), it returns false. -// There may be different ways how to detect systemd, this one makes sure that -// systemd-runs (needed by Mount()) works. -func detectSystemd() bool { - if _, err := exec.LookPath("systemd-run"); err != nil { - klog.V(2).Info("Detected OS without systemd") - return false - } - // Try to run systemd-run --scope /bin/true, that should be enough - // to make sure that systemd is really running and not just installed, - // which happens when running in a container with a systemd-based image - // but with different pid 1. - cmd := exec.Command("systemd-run", "--description=Kubernetes systemd probe", "--scope", "true") - output, err := cmd.CombinedOutput() - if err != nil { - klog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") - klog.V(4).Infof("systemd-run failed with: %v", err) - klog.V(4).Infof("systemd-run output: %s", string(output)) - return false - } - klog.V(2).Info("Detected OS with systemd") - return true -} - -// makeMountArgs makes the arguments to the mount(8) command. -func makeMountArgs(source, target, fstype string, options []string) []string { - // Build mount command as follows: - // mount [-t $fstype] [-o $options] [$source] $target - mountArgs := []string{} - if len(fstype) > 0 { - mountArgs = append(mountArgs, "-t", fstype) - } - if len(options) > 0 { - mountArgs = append(mountArgs, "-o", strings.Join(options, ",")) - } - if len(source) > 0 { - mountArgs = append(mountArgs, source) - } - mountArgs = append(mountArgs, target) - - return mountArgs -} - -// addSystemdScope adds "system-run --scope" to given command line -func addSystemdScope(systemdRunPath, mountName, command string, args []string) (string, []string) { - descriptionArg := fmt.Sprintf("--description=Kubernetes transient mount for %s", mountName) - systemdRunArgs := []string{descriptionArg, "--scope", "--", command} - return systemdRunPath, append(systemdRunArgs, args...) -} - -// Unmount unmounts the target. -func (mounter *Mounter) Unmount(target string) error { - klog.V(4).Infof("Unmounting %s", target) - command := exec.Command("umount", target) - output, err := command.CombinedOutput() - if err != nil { - return fmt.Errorf("unmount failed: %v\nUnmounting arguments: %s\nOutput: %s", err, target, string(output)) - } - return nil -} - -// List returns a list of all mounted filesystems. -func (*Mounter) List() ([]MntPoint, error) { - return listProcMounts(procMountsPath) -} - -// IsMountPointMatch determines if a directory is a mountpoint. -func (mounter *Mounter) IsMountPointMatch(mp MntPoint, dir string) bool { - deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) - return ((mp.Path == dir) || (mp.Path == deletedDir)) -} - -// IsNotMountPoint determines if a directory is not a mountpoint. -func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { - return IsNotMountPoint(mounter, dir) -} - -// IsLikelyNotMountPoint determines if a directory is not a mountpoint. -// It is fast but not necessarily ALWAYS correct. If the path is in fact -// a bind mount from one part of a mount to another it will not be detected. -// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b") -// will return true. When in fact /tmp/b is a mount point. If this situation -// if of interest to you, don't use this function... -func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { - stat, err := os.Stat(file) - if err != nil { - return true, err - } - rootStat, err := os.Lstat(filepath.Dir(strings.TrimSuffix(file, "/"))) - if err != nil { - return true, err - } - // If the directory has a different device as parent, then it is a mountpoint. - if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev { - return false, nil - } - - return true, nil -} - -// DeviceOpened checks if block device in use by calling Open with O_EXCL flag. -// If pathname is not a device, log and return false with nil error. -// If open returns errno EBUSY, return true with nil error. -// If open returns nil, return false with nil error. -// Otherwise, return false with error -func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { - return exclusiveOpenFailsOnDevice(pathname) -} - -// PathIsDevice uses FileInfo returned from os.Stat to check if path refers -// to a device. -func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) { - pathType, err := mounter.GetFileType(pathname) - isDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev - return isDevice, err -} - -func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { - var isDevice bool - finfo, err := os.Stat(pathname) - if os.IsNotExist(err) { - isDevice = false - } - // err in call to os.Stat - if err != nil { - return false, fmt.Errorf( - "PathIsDevice failed for path %q: %v", - pathname, - err) - } - // path refers to a device - if finfo.Mode()&os.ModeDevice != 0 { - isDevice = true - } - - if !isDevice { - klog.Errorf("Path %q is not referring to a device.", pathname) - return false, nil - } - fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL, 0) - // If the device is in use, open will return an invalid fd. - // When this happens, it is expected that Close will fail and throw an error. - defer unix.Close(fd) - if errno == nil { - // device not in use - return false, nil - } else if errno == unix.EBUSY { - // device is in use - return true, nil - } - // error during call to Open - return false, errno -} - -// GetDeviceNameFromMount when given a mount point, finds the device name from its global -// mount point -func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { - return getDeviceNameFromMount(mounter, mountPath, pluginDir) -} - -// getDeviceNameFromMount find the device name from /proc/mounts in which -// the mount path reference should match the given plugin directory. In case no mount path reference -// matches, returns the volume name taken from its given mountPath -func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { - refs, err := mounter.GetMountRefs(mountPath) - if err != nil { - klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) - return "", err - } - if len(refs) == 0 { - klog.V(4).Infof("Directory %s is not mounted", mountPath) - return "", fmt.Errorf("directory %s is not mounted", mountPath) - } - basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) - for _, ref := range refs { - if strings.HasPrefix(ref, basemountPath) { - volumeID, err := filepath.Rel(basemountPath, ref) - if err != nil { - klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) - return "", err - } - return volumeID, nil - } - } - - return path.Base(mountPath), nil -} - -func listProcMounts(mountFilePath string) ([]MntPoint, error) { - content, err := utilio.ConsistentRead(mountFilePath, maxListTries) - if err != nil { - return nil, err - } - return parseProcMounts(content) -} - -func parseProcMounts(content []byte) ([]MntPoint, error) { - out := []MntPoint{} - lines := strings.Split(string(content), "\n") - for _, line := range lines { - if line == "" { - // the last split() item is empty string following the last \n - continue - } - fields := strings.Fields(line) - if len(fields) != expectedNumFieldsPerLine { - return nil, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line) - } - - mp := MntPoint{ - Device: fields[0], - Path: fields[1], - Type: fields[2], - Opts: strings.Split(fields[3], ","), - } - - freq, err := strconv.Atoi(fields[4]) - if err != nil { - return nil, err - } - mp.Freq = freq - - pass, err := strconv.Atoi(fields[5]) - if err != nil { - return nil, err - } - mp.Pass = pass - - out = append(out, mp) - } - return out, nil -} - -// MakeRShared makes the bind mount rshared if needed. -func (mounter *Mounter) MakeRShared(path string) error { - return doMakeRShared(path, procMountInfoPath) -} - -// GetFileType returns the string FileType of the path. -func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { - return getFileType(pathname) -} - -// MakeDir creates a directory at the specified path. -func (mounter *Mounter) MakeDir(pathname string) error { - err := os.MkdirAll(pathname, os.FileMode(0755)) - if err != nil { - if !os.IsExist(err) { - return err - } - } - return nil -} - -// MakeFile creates a file with permissions 0644 at the specified path. -func (mounter *Mounter) MakeFile(pathname string) error { - f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644)) - if err != nil { - if !os.IsExist(err) { - return err - } - defer f.Close() - } - return nil -} - -// ExistsPath returns true if the specified path exists. -func (mounter *Mounter) ExistsPath(pathname string) (bool, error) { - return utilpath.Exists(utilpath.CheckFollowSymlink, pathname) -} - -// EvalHostSymlinks returns the actual path a symlink points to. -func (mounter *Mounter) EvalHostSymlinks(pathname string) (string, error) { - return filepath.EvalSymlinks(pathname) -} - -// formatAndMount uses unix utils to format and mount the given disk -func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - readOnly := false - for _, option := range options { - if option == "ro" { - readOnly = true - break - } - } - - options = append(options, "defaults") - - if !readOnly { - // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. - klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) - args := []string{"-a", source} - out, err := mounter.Exec.Run("fsck", args...) - if err != nil { - ee, isExitError := err.(utilexec.ExitError) - switch { - case err == utilexec.ErrExecutableNotFound: - klog.Warning("'fsck' not found on system; continuing mount without running 'fsck'") - case isExitError && ee.ExitStatus() == fsckErrorsCorrected: - klog.Infof("Device %s has errors which were corrected by fsck", source) - case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: - return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s", source, string(out)) - case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: - klog.Infof("`fsck` error %s", string(out)) - } - } - } - - // Try to mount the disk - klog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) - mountErr := mounter.Interface.Mount(source, target, fstype, options) - if mountErr != nil { - // Mount failed. This indicates either that the disk is unformatted or - // it contains an unexpected filesystem. - existingFormat, err := mounter.GetDiskFormat(source) - if err != nil { - return err - } - if existingFormat == "" { - if readOnly { - // Don't attempt to format if mounting as readonly, return an error to reflect this. - return errors.New("failed to mount unformatted volume as read only") - } - - // Disk is unformatted so format it. - args := []string{source} - // Use 'ext4' as the default - if len(fstype) == 0 { - fstype = "ext4" - } - - if fstype == "ext4" || fstype == "ext3" { - args = []string{ - "-F", // Force flag - "-m0", // Zero blocks reserved for super-user - source, - } - } - klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) - _, err := mounter.Exec.Run("mkfs."+fstype, args...) - if err == nil { - // the disk has been formatted successfully try to mount it again. - klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) - return mounter.Interface.Mount(source, target, fstype, options) - } - klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) - return err - } - // Disk is already formatted and failed to mount - if len(fstype) == 0 || fstype == existingFormat { - // This is mount error - return mountErr - } - // Block device is formatted with unexpected filesystem, let the user know - return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr) - } - return mountErr -} - -// GetDiskFormat uses 'blkid' to see if the given disk is unformatted -func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { - args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk} - klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) - dataOut, err := mounter.Exec.Run("blkid", args...) - output := string(dataOut) - klog.V(4).Infof("Output: %q, err: %v", output, err) - - if err != nil { - if exit, ok := err.(utilexec.ExitError); ok { - if exit.ExitStatus() == 2 { - // Disk device is unformatted. - // For `blkid`, if the specified token (TYPE/PTTYPE, etc) was - // not found, or no (specified) devices could be identified, an - // exit code of 2 is returned. - return "", nil - } - } - klog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) - return "", err - } - - var fstype, pttype string - - lines := strings.Split(output, "\n") - for _, l := range lines { - if len(l) <= 0 { - // Ignore empty line. - continue - } - cs := strings.Split(l, "=") - if len(cs) != 2 { - return "", fmt.Errorf("blkid returns invalid output: %s", output) - } - // TYPE is filesystem type, and PTTYPE is partition table type, according - // to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/. - if cs[0] == "TYPE" { - fstype = cs[1] - } else if cs[0] == "PTTYPE" { - pttype = cs[1] - } - } - - if len(pttype) > 0 { - klog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) - // Returns a special non-empty string as filesystem type, then kubelet - // will not format it. - return "unknown data, probably partitions", nil - } - - return fstype, nil -} - -// isShared returns true, if given path is on a mount point that has shared -// mount propagation. -func isShared(mount string, mountInfoPath string) (bool, error) { - info, err := findMountInfo(mount, mountInfoPath) - if err != nil { - return false, err - } - - // parse optional parameters - for _, opt := range info.optionalFields { - if strings.HasPrefix(opt, "shared:") { - return true, nil - } - } - return false, nil -} - -// This represents a single line in /proc//mountinfo. -type mountInfo struct { - // Unique ID for the mount (maybe reused after umount). - id int - // The ID of the parent mount (or of self for the root of this mount namespace's mount tree). - parentID int - // The value of `st_dev` for files on this filesystem. - majorMinor string - // The pathname of the directory in the filesystem which forms the root of this mount. - root string - // Mount source, filesystem-specific information. e.g. device, tmpfs name. - source string - // Mount point, the pathname of the mount point. - mountPoint string - // Optional fields, zero or more fields of the form "tag[:value]". - optionalFields []string - // The filesystem type in the form "type[.subtype]". - fsType string - // Per-mount options. - mountOptions []string - // Per-superblock options. - superOptions []string -} - -// parseMountInfo parses /proc/xxx/mountinfo. -func parseMountInfo(filename string) ([]mountInfo, error) { - content, err := utilio.ConsistentRead(filename, maxListTries) - if err != nil { - return []mountInfo{}, err - } - contentStr := string(content) - infos := []mountInfo{} - - for _, line := range strings.Split(contentStr, "\n") { - if line == "" { - // the last split() item is empty string following the last \n - continue - } - // See `man proc` for authoritative description of format of the file. - fields := strings.Fields(line) - if len(fields) < expectedAtLeastNumFieldsPerMountInfo { - return nil, fmt.Errorf("wrong number of fields in (expected at least %d, got %d): %s", expectedAtLeastNumFieldsPerMountInfo, len(fields), line) - } - id, err := strconv.Atoi(fields[0]) - if err != nil { - return nil, err - } - parentID, err := strconv.Atoi(fields[1]) - if err != nil { - return nil, err - } - info := mountInfo{ - id: id, - parentID: parentID, - majorMinor: fields[2], - root: fields[3], - mountPoint: fields[4], - mountOptions: strings.Split(fields[5], ","), - } - // All fields until "-" are "optional fields". - i := 6 - for ; i < len(fields) && fields[i] != "-"; i++ { - info.optionalFields = append(info.optionalFields, fields[i]) - } - // Parse the rest 3 fields. - i++ - if len(fields)-i < 3 { - return nil, fmt.Errorf("expect 3 fields in %s, got %d", line, len(fields)-i) - } - info.fsType = fields[i] - info.source = fields[i+1] - info.superOptions = strings.Split(fields[i+2], ",") - infos = append(infos, info) - } - return infos, nil -} - -func findMountInfo(path, mountInfoPath string) (mountInfo, error) { - infos, err := parseMountInfo(mountInfoPath) - if err != nil { - return mountInfo{}, err - } - - // process /proc/xxx/mountinfo in backward order and find the first mount - // point that is prefix of 'path' - that's the mount where path resides - var info *mountInfo - for i := len(infos) - 1; i >= 0; i-- { - if PathWithinBase(path, infos[i].mountPoint) { - info = &infos[i] - break - } - } - if info == nil { - return mountInfo{}, fmt.Errorf("cannot find mount point for %q", path) - } - return *info, nil -} - -// doMakeRShared is common implementation of MakeRShared on Linux. It checks if -// path is shared and bind-mounts it as rshared if needed. mountCmd and -// mountArgs are expected to contain mount-like command, doMakeRShared will add -// '--bind ' and '--make-rshared ' to mountArgs. -func doMakeRShared(path string, mountInfoFilename string) error { - shared, err := isShared(path, mountInfoFilename) - if err != nil { - return err - } - if shared { - klog.V(4).Infof("Directory %s is already on a shared mount", path) - return nil - } - - klog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) - // mount --bind /var/lib/kubelet /var/lib/kubelet - if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_BIND, "" /*data*/); err != nil { - return fmt.Errorf("failed to bind-mount %s: %v", path, err) - } - - // mount --make-rshared /var/lib/kubelet - if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_SHARED|syscall.MS_REC, "" /*data*/); err != nil { - return fmt.Errorf("failed to make %s rshared: %v", path, err) - } - - return nil -} - -// getSELinuxSupport is common implementation of GetSELinuxSupport on Linux. -func getSELinuxSupport(path string, mountInfoFilename string) (bool, error) { - info, err := findMountInfo(path, mountInfoFilename) - if err != nil { - return false, err - } - - // "seclabel" can be both in mount options and super options. - for _, opt := range info.superOptions { - if opt == "seclabel" { - return true, nil - } - } - for _, opt := range info.mountOptions { - if opt == "seclabel" { - return true, nil - } - } - return false, nil -} - -// PrepareSafeSubpath safely creates a new subpath. -func (mounter *Mounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) { - newHostPath, err = doBindSubPath(mounter, subPath) - - // There is no action when the container starts. Bind-mount will be cleaned - // when container stops by CleanSubPaths. - cleanupAction = nil - return newHostPath, cleanupAction, err -} - -// This implementation is shared between Linux and NsEnterMounter -func safeOpenSubPath(mounter Interface, subpath Subpath) (int, error) { - if !PathWithinBase(subpath.Path, subpath.VolumePath) { - return -1, fmt.Errorf("subpath %q not within volume path %q", subpath.Path, subpath.VolumePath) - } - fd, err := doSafeOpen(subpath.Path, subpath.VolumePath) - if err != nil { - return -1, fmt.Errorf("error opening subpath %v: %v", subpath.Path, err) - } - return fd, nil -} - -// prepareSubpathTarget creates target for bind-mount of subpath. It returns -// "true" when the target already exists and something is mounted there. -// Given Subpath must have all paths with already resolved symlinks and with -// paths relevant to kubelet (when it runs in a container). -// This function is called also by NsEnterMounter. It works because -// /var/lib/kubelet is mounted from the host into the container with Kubelet as -// /var/lib/kubelet too. -func prepareSubpathTarget(mounter Interface, subpath Subpath) (bool, string, error) { - // Early check for already bind-mounted subpath. - bindPathTarget := getSubpathBindTarget(subpath) - notMount, err := IsNotMountPoint(mounter, bindPathTarget) - if err != nil { - if !os.IsNotExist(err) { - return false, "", fmt.Errorf("error checking path %s for mount: %s", bindPathTarget, err) - } - // Ignore ErrorNotExist: the file/directory will be created below if it does not exist yet. - notMount = true - } - if !notMount { - // It's already mounted - klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) - return true, bindPathTarget, nil - } - - // bindPathTarget is in /var/lib/kubelet and thus reachable without any - // translation even to containerized kubelet. - bindParent := filepath.Dir(bindPathTarget) - err = os.MkdirAll(bindParent, 0750) - if err != nil && !os.IsExist(err) { - return false, "", fmt.Errorf("error creating directory %s: %s", bindParent, err) - } - - t, err := os.Lstat(subpath.Path) - if err != nil { - return false, "", fmt.Errorf("lstat %s failed: %s", subpath.Path, err) - } - - if t.Mode()&os.ModeDir > 0 { - if err = os.Mkdir(bindPathTarget, 0750); err != nil && !os.IsExist(err) { - return false, "", fmt.Errorf("error creating directory %s: %s", bindPathTarget, err) - } - } else { - // "/bin/touch ". - // A file is enough for all possible targets (symlink, device, pipe, - // socket, ...), bind-mounting them into a file correctly changes type - // of the target file. - if err = ioutil.WriteFile(bindPathTarget, []byte{}, 0640); err != nil { - return false, "", fmt.Errorf("error creating file %s: %s", bindPathTarget, err) - } - } - return false, bindPathTarget, nil -} - -func getSubpathBindTarget(subpath Subpath) string { - // containerName is DNS label, i.e. safe as a directory name. - return filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName, strconv.Itoa(subpath.VolumeMountIndex)) -} - -func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err error) { - // Linux, kubelet runs on the host: - // - safely open the subpath - // - bind-mount /proc//fd/ to subpath target - // User can't change /proc//fd/ to point to a bad place. - - // Evaluate all symlinks here once for all subsequent functions. - newVolumePath, err := filepath.EvalSymlinks(subpath.VolumePath) - if err != nil { - return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.VolumePath, err) - } - newPath, err := filepath.EvalSymlinks(subpath.Path) - if err != nil { - return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) - } - klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) - subpath.VolumePath = newVolumePath - subpath.Path = newPath - - fd, err := safeOpenSubPath(mounter, subpath) - if err != nil { - return "", err - } - defer syscall.Close(fd) - - alreadyMounted, bindPathTarget, err := prepareSubpathTarget(mounter, subpath) - if err != nil { - return "", err - } - if alreadyMounted { - return bindPathTarget, nil - } - - success := false - defer func() { - // Cleanup subpath on error - if !success { - klog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) - if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) - } - } - }() - - kubeletPid := os.Getpid() - mountSource := fmt.Sprintf("/proc/%d/fd/%v", kubeletPid, fd) - - // Do the bind mount - options := []string{"bind"} - klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) - if err = mounter.Mount(mountSource, bindPathTarget, "" /*fstype*/, options); err != nil { - return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) - } - success = true - - klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) - return bindPathTarget, nil -} - -// CleanSubPaths cleans up the subpath mounts for the volume in the pod directory. -func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { - return doCleanSubPaths(mounter, podDir, volumeName) -} - -// This implementation is shared between Linux and NsEnterMounter -func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error { - // scan /var/lib/kubelet/pods//volume-subpaths//* - subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName) - klog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) - - containerDirs, err := ioutil.ReadDir(subPathDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return fmt.Errorf("error reading %s: %s", subPathDir, err) - } - - for _, containerDir := range containerDirs { - if !containerDir.IsDir() { - klog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) - continue - } - klog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) - - // scan /var/lib/kubelet/pods//volume-subpaths///* - fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) - subPaths, err := ioutil.ReadDir(fullContainerDirPath) - if err != nil { - return fmt.Errorf("error reading %s: %s", fullContainerDirPath, err) - } - for _, subPath := range subPaths { - if err = doCleanSubPath(mounter, fullContainerDirPath, subPath.Name()); err != nil { - return err - } - } - // Whole container has been processed, remove its directory. - if err := os.Remove(fullContainerDirPath); err != nil { - return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err) - } - klog.V(5).Infof("Removed %s", fullContainerDirPath) - } - // Whole pod volume subpaths have been cleaned up, remove its subpath directory. - if err := os.Remove(subPathDir); err != nil { - return fmt.Errorf("error deleting %s: %s", subPathDir, err) - } - klog.V(5).Infof("Removed %s", subPathDir) - - // Remove entire subpath directory if it's the last one - podSubPathDir := filepath.Join(podDir, containerSubPathDirectoryName) - if err := os.Remove(podSubPathDir); err != nil && !os.IsExist(err) { - return fmt.Errorf("error deleting %s: %s", podSubPathDir, err) - } - klog.V(5).Infof("Removed %s", podSubPathDir) - return nil -} - -// doCleanSubPath tears down the single subpath bind mount -func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string) error { - // process /var/lib/kubelet/pods//volume-subpaths/// - klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) - fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex) - notMnt, err := IsNotMountPoint(mounter, fullSubPath) - if err != nil { - return fmt.Errorf("error checking %s for mount: %s", fullSubPath, err) - } - // Unmount it - if !notMnt { - if err = mounter.Unmount(fullSubPath); err != nil { - return fmt.Errorf("error unmounting %s: %s", fullSubPath, err) - } - klog.V(5).Infof("Unmounted %s", fullSubPath) - } - // Remove it *non*-recursively, just in case there were some hiccups. - if err = os.Remove(fullSubPath); err != nil { - return fmt.Errorf("error deleting %s: %s", fullSubPath, err) - } - klog.V(5).Infof("Removed %s", fullSubPath) - return nil -} - -// cleanSubPath will teardown the subpath bind mount and any remove any directories if empty -func cleanSubPath(mounter Interface, subpath Subpath) error { - containerDir := filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName) - - // Clean subdir bindmount - if err := doCleanSubPath(mounter, containerDir, strconv.Itoa(subpath.VolumeMountIndex)); err != nil && !os.IsNotExist(err) { - return err - } - - // Recursively remove directories if empty - if err := removeEmptyDirs(subpath.PodDir, containerDir); err != nil { - return err - } - - return nil -} - -// removeEmptyDirs works backwards from endDir to baseDir and removes each directory -// if it is empty. It stops once it encounters a directory that has content -func removeEmptyDirs(baseDir, endDir string) error { - if !PathWithinBase(endDir, baseDir) { - return fmt.Errorf("endDir %q is not within baseDir %q", endDir, baseDir) - } - - for curDir := endDir; curDir != baseDir; curDir = filepath.Dir(curDir) { - s, err := os.Stat(curDir) - if err != nil { - if os.IsNotExist(err) { - klog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) - continue - } - return fmt.Errorf("error stat %q: %v", curDir, err) - } - if !s.IsDir() { - return fmt.Errorf("path %q not a directory", curDir) - } - - err = os.Remove(curDir) - if os.IsExist(err) { - klog.V(5).Infof("Directory %q not empty, not removing", curDir) - break - } else if err != nil { - return fmt.Errorf("error removing directory %q: %v", curDir, err) - } - klog.V(5).Infof("Removed directory %q", curDir) - } - return nil -} - -// SafeMakeDir safely creates a subdirectory at the specified path base -func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode) error { - realBase, err := filepath.EvalSymlinks(base) - if err != nil { - return fmt.Errorf("error resolving symlinks in %s: %s", base, err) - } - - realFullPath := filepath.Join(realBase, subdir) - - return doSafeMakeDir(realFullPath, realBase, perm) -} - -// GetMountRefs finds all of the mount references to the source. -func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(pathname); os.IsNotExist(err) { - return []string{}, nil - } else if err != nil { - return nil, err - } - realpath, err := filepath.EvalSymlinks(pathname) - if err != nil { - return nil, err - } - return searchMountPoints(realpath, procMountInfoPath) -} - -// GetSELinuxSupport is common implementation of GetSELinuxSupport on Linux. -func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { - return getSELinuxSupport(pathname, procMountInfoPath) -} - -// GetFSGroup returns the filesystem group for the path name. -func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { - realpath, err := filepath.EvalSymlinks(pathname) - if err != nil { - return 0, err - } - return getFSGroup(realpath) -} - -// GetMode returns the unix filesystem mode for the path. -func (mounter *Mounter) GetMode(pathname string) (os.FileMode, error) { - return getMode(pathname) -} - -// This implementation is shared between Linux and NsEnterMounter -func getFSGroup(pathname string) (int64, error) { - info, err := os.Stat(pathname) - if err != nil { - return 0, err - } - return int64(info.Sys().(*syscall.Stat_t).Gid), nil -} - -// This implementation is shared between Linux and NsEnterMounter -func getMode(pathname string) (os.FileMode, error) { - info, err := os.Stat(pathname) - if err != nil { - return 0, err - } - return info.Mode(), nil -} - -// This implementation is shared between Linux and NsEnterMounter. Both pathname -// and base must be either already resolved symlinks or that will be resolved in -// kubelet's mount namespace (in case it runs containerized). -func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - klog.V(4).Infof("Creating directory %q within base %q", pathname, base) - - if !PathWithinBase(pathname, base) { - return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) - } - - // Quick check if the directory already exists - s, err := os.Stat(pathname) - if err == nil { - // Path exists - if s.IsDir() { - // The directory already exists. It can be outside of the parent, - // but there is no race-proof check. - klog.V(4).Infof("Directory %s already exists", pathname) - return nil - } - return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} - } - - // Find all existing directories - existingPath, toCreate, err := findExistingPrefix(base, pathname) - if err != nil { - return fmt.Errorf("error opening directory %s: %s", pathname, err) - } - // Ensure the existing directory is inside allowed base - fullExistingPath, err := filepath.EvalSymlinks(existingPath) - if err != nil { - return fmt.Errorf("error opening directory %s: %s", existingPath, err) - } - if !PathWithinBase(fullExistingPath, base) { - return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err) - } - - klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) - parentFD, err := doSafeOpen(fullExistingPath, base) - if err != nil { - return fmt.Errorf("cannot open directory %s: %s", existingPath, err) - } - childFD := -1 - defer func() { - if parentFD != -1 { - if err = syscall.Close(parentFD); err != nil { - klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) - } - } - if childFD != -1 { - if err = syscall.Close(childFD); err != nil { - klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) - } - } - }() - - currentPath := fullExistingPath - // create the directories one by one, making sure nobody can change - // created directory into symlink. - for _, dir := range toCreate { - currentPath = filepath.Join(currentPath, dir) - klog.V(4).Infof("Creating %s", dir) - err = syscall.Mkdirat(parentFD, currentPath, uint32(perm)) - if err != nil { - return fmt.Errorf("cannot create directory %s: %s", currentPath, err) - } - // Dive into the created directory - childFD, err := syscall.Openat(parentFD, dir, nofollowFlags, 0) - if err != nil { - return fmt.Errorf("cannot open %s: %s", currentPath, err) - } - // We can be sure that childFD is safe to use. It could be changed - // by user after Mkdirat() and before Openat(), however: - // - it could not be changed to symlink - we use nofollowFlags - // - it could be changed to a file (or device, pipe, socket, ...) - // but either subsequent Mkdirat() fails or we mount this file - // to user's container. Security is no violated in both cases - // and user either gets error or the file that it can already access. - - if err = syscall.Close(parentFD); err != nil { - klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) - } - parentFD = childFD - childFD = -1 - } - - // Everything was created. mkdirat(..., perm) above was affected by current - // umask and we must apply the right permissions to the last directory - // (that's the one that will be available to the container as subpath) - // so user can read/write it. This is the behavior of previous code. - // TODO: chmod all created directories, not just the last one. - // parentFD is the last created directory. - - // Translate perm (os.FileMode) to uint32 that fchmod() expects - kernelPerm := uint32(perm & os.ModePerm) - if perm&os.ModeSetgid > 0 { - kernelPerm |= syscall.S_ISGID - } - if perm&os.ModeSetuid > 0 { - kernelPerm |= syscall.S_ISUID - } - if perm&os.ModeSticky > 0 { - kernelPerm |= syscall.S_ISVTX - } - if err = syscall.Fchmod(parentFD, kernelPerm); err != nil { - return fmt.Errorf("chmod %q failed: %s", currentPath, err) - } - return nil -} - -// findExistingPrefix finds prefix of pathname that exists. In addition, it -// returns list of remaining directories that don't exist yet. -func findExistingPrefix(base, pathname string) (string, []string, error) { - rel, err := filepath.Rel(base, pathname) - if err != nil { - return base, nil, err - } - dirs := strings.Split(rel, string(filepath.Separator)) - - // Do OpenAt in a loop to find the first non-existing dir. Resolve symlinks. - // This should be faster than looping through all dirs and calling os.Stat() - // on each of them, as the symlinks are resolved only once with OpenAt(). - currentPath := base - fd, err := syscall.Open(currentPath, syscall.O_RDONLY, 0) - if err != nil { - return pathname, nil, fmt.Errorf("error opening %s: %s", currentPath, err) - } - defer func() { - if err = syscall.Close(fd); err != nil { - klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) - } - }() - for i, dir := range dirs { - // Using O_PATH here will prevent hangs in case user replaces directory with - // fifo - childFD, err := syscall.Openat(fd, dir, unix.O_PATH, 0) - if err != nil { - if os.IsNotExist(err) { - return currentPath, dirs[i:], nil - } - return base, nil, err - } - if err = syscall.Close(fd); err != nil { - klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) - } - fd = childFD - currentPath = filepath.Join(currentPath, dir) - } - return pathname, []string{}, nil -} - -// This implementation is shared between Linux and NsEnterMounter -// Open path and return its fd. -// Symlinks are disallowed (pathname must already resolve symlinks), -// and the path must be within the base directory. -func doSafeOpen(pathname string, base string) (int, error) { - pathname = filepath.Clean(pathname) - base = filepath.Clean(base) - - // Calculate segments to follow - subpath, err := filepath.Rel(base, pathname) - if err != nil { - return -1, err - } - segments := strings.Split(subpath, string(filepath.Separator)) - - // Assumption: base is the only directory that we have under control. - // Base dir is not allowed to be a symlink. - parentFD, err := syscall.Open(base, nofollowFlags, 0) - if err != nil { - return -1, fmt.Errorf("cannot open directory %s: %s", base, err) - } - defer func() { - if parentFD != -1 { - if err = syscall.Close(parentFD); err != nil { - klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) - } - } - }() - - childFD := -1 - defer func() { - if childFD != -1 { - if err = syscall.Close(childFD); err != nil { - klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) - } - } - }() - - currentPath := base - - // Follow the segments one by one using openat() to make - // sure the user cannot change already existing directories into symlinks. - for _, seg := range segments { - currentPath = filepath.Join(currentPath, seg) - if !PathWithinBase(currentPath, base) { - return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base) - } - - klog.V(5).Infof("Opening path %s", currentPath) - childFD, err = syscall.Openat(parentFD, seg, openFDFlags, 0) - if err != nil { - return -1, fmt.Errorf("cannot open %s: %s", currentPath, err) - } - - var deviceStat unix.Stat_t - err := unix.Fstat(childFD, &deviceStat) - if err != nil { - return -1, fmt.Errorf("Error running fstat on %s with %v", currentPath, err) - } - fileFmt := deviceStat.Mode & syscall.S_IFMT - if fileFmt == syscall.S_IFLNK { - return -1, fmt.Errorf("Unexpected symlink found %s", currentPath) - } - - // Close parentFD - if err = syscall.Close(parentFD); err != nil { - return -1, fmt.Errorf("closing fd for %q failed: %v", filepath.Dir(currentPath), err) - } - // Set child to new parent - parentFD = childFD - childFD = -1 - } - - // We made it to the end, return this fd, don't close it - finalFD := parentFD - parentFD = -1 - - return finalFD, nil -} - -// searchMountPoints finds all mount references to the source, returns a list of -// mountpoints. -// This function assumes source cannot be device. -// Some filesystems may share a source name, e.g. tmpfs. And for bind mounting, -// it's possible to mount a non-root path of a filesystem, so we need to use -// root path and major:minor to represent mount source uniquely. -// This implementation is shared between Linux and NsEnterMounter -func searchMountPoints(hostSource, mountInfoPath string) ([]string, error) { - mis, err := parseMountInfo(mountInfoPath) - if err != nil { - return nil, err - } - - mountID := 0 - rootPath := "" - majorMinor := "" - - // Finding the underlying root path and major:minor if possible. - // We need search in backward order because it's possible for later mounts - // to overlap earlier mounts. - for i := len(mis) - 1; i >= 0; i-- { - if hostSource == mis[i].mountPoint || PathWithinBase(hostSource, mis[i].mountPoint) { - // If it's a mount point or path under a mount point. - mountID = mis[i].id - rootPath = filepath.Join(mis[i].root, strings.TrimPrefix(hostSource, mis[i].mountPoint)) - majorMinor = mis[i].majorMinor - break - } - } - - if rootPath == "" || majorMinor == "" { - return nil, fmt.Errorf("failed to get root path and major:minor for %s", hostSource) - } - - var refs []string - for i := range mis { - if mis[i].id == mountID { - // Ignore mount entry for mount source itself. - continue - } - if mis[i].root == rootPath && mis[i].majorMinor == majorMinor { - refs = append(refs, mis[i].mountPoint) - } - } - - return refs, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_unsupported.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_unsupported.go deleted file mode 100644 index cab7aecc3ba..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_unsupported.go +++ /dev/null @@ -1,162 +0,0 @@ -// +build !linux,!windows,!providerless - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mount - -import ( - "errors" - "os" -) - -// Mounter provides the default unsupported implementation of mount.Interface -type Mounter struct { - mounterPath string -} - -var errUnsupported = errors.New(" util/mount on this platform is not supported") - -// New returns a mount.Interface for the current system. -// It provides options to override the default mounter behavior. -// mounterPath allows using an alternative to `/bin/mount` for mounting. -func New(mounterPath string) Interface { - return &Mounter{ - mounterPath: mounterPath, - } -} - -// Mount returns an error -func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { - return errUnsupported -} - -// Unmount returns an error -func (mounter *Mounter) Unmount(target string) error { - return errUnsupported -} - -// List returns an error -func (mounter *Mounter) List() ([]MntPoint, error) { - return []MntPoint{}, errUnsupported -} - -// IsMountPointMatch returns an error -func (mounter *Mounter) IsMountPointMatch(mp MntPoint, dir string) bool { - return (mp.Path == dir) -} - -// IsNotMountPoint returns an error -func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { - return IsNotMountPoint(mounter, dir) -} - -// IsLikelyNotMountPoint returns an error -func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { - return true, errUnsupported -} - -// GetDeviceNameFromMount returns an error -func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { - return "", errUnsupported -} - -func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { - return "", errUnsupported -} - -// DeviceOpened returns an error -func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { - return false, errUnsupported -} - -// PathIsDevice returns an error -func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) { - return true, errUnsupported -} - -// MakeRShared returns an error -func (mounter *Mounter) MakeRShared(path string) error { - return errUnsupported -} - -func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - return mounter.Interface.Mount(source, target, fstype, options) -} - -func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { - return true, errUnsupported -} - -// GetFileType returns an error -func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { - return FileType("fake"), errUnsupported -} - -// MakeDir returns an error -func (mounter *Mounter) MakeDir(pathname string) error { - return errUnsupported -} - -// MakeFile returns an error -func (mounter *Mounter) MakeFile(pathname string) error { - return errUnsupported -} - -// ExistsPath returns an error -func (mounter *Mounter) ExistsPath(pathname string) (bool, error) { - return true, errors.New("not implemented") -} - -// EvalHostSymlinks returns an error -func (mounter *Mounter) EvalHostSymlinks(pathname string) (string, error) { - return "", errUnsupported -} - -// PrepareSafeSubpath returns an error -func (mounter *Mounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) { - return subPath.Path, nil, errUnsupported -} - -// CleanSubPaths returns an error -func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { - return errUnsupported -} - -// SafeMakeDir returns an error -func (mounter *Mounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error { - return errUnsupported -} - -// GetMountRefs returns an error -func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - return nil, errors.New("not implemented") -} - -// GetFSGroup returns an error -func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { - return -1, errors.New("not implemented") -} - -// GetSELinuxSupport returns an error -func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { - return false, errors.New("not implemented") -} - -// GetMode returns an error -func (mounter *Mounter) GetMode(pathname string) (os.FileMode, error) { - return 0, errors.New("not implemented") -} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_windows.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_windows.go deleted file mode 100644 index 18fb28417c7..00000000000 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_windows.go +++ /dev/null @@ -1,613 +0,0 @@ -// +build windows,!providerless - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mount - -import ( - "fmt" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - - "k8s.io/klog" - - utilpath "k8s.io/utils/path" -) - -// Mounter provides the default implementation of mount.Interface -// for the windows platform. This implementation assumes that the -// kubelet is running in the host's root mount namespace. -type Mounter struct { - mounterPath string -} - -// New returns a mount.Interface for the current system. -// It provides options to override the default mounter behavior. -// mounterPath allows using an alternative to `/bin/mount` for mounting. -func New(mounterPath string) Interface { - return &Mounter{ - mounterPath: mounterPath, - } -} - -// Mount : mounts source to target as NTFS with given options. -func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { - target = normalizeWindowsPath(target) - - if source == "tmpfs" { - klog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) - return os.MkdirAll(target, 0755) - } - - parentDir := filepath.Dir(target) - if err := os.MkdirAll(parentDir, 0755); err != nil { - return err - } - - klog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", - options, source, target, fstype) - bindSource := "" - - // tell it's going to mount azure disk or azure file according to options - if bind, _, _ := isBind(options); bind { - // mount azure disk - bindSource = normalizeWindowsPath(source) - } else { - if len(options) < 2 { - klog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", - options, len(options), source, target) - return nil - } - - // currently only cifs mount is supported - if strings.ToLower(fstype) != "cifs" { - return fmt.Errorf("azureMount: only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options) - } - - bindSource = source - - // use PowerShell Environment Variables to store user input string to prevent command line injection - // https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1 - cmdLine := fmt.Sprintf(`$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` + - `;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` + - `;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential`) - - cmd := exec.Command("powershell", "/c", cmdLine) - cmd.Env = append(os.Environ(), - fmt.Sprintf("smbuser=%s", options[0]), - fmt.Sprintf("smbpassword=%s", options[1]), - fmt.Sprintf("smbremotepath=%s", source)) - if output, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("azureMount: SmbGlobalMapping failed: %v, only SMB mount is supported now, output: %q", err, string(output)) - } - } - - if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil { - klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) - return err - } - - return nil -} - -// Unmount unmounts the target. -func (mounter *Mounter) Unmount(target string) error { - klog.V(4).Infof("azureMount: Unmount target (%q)", target) - target = normalizeWindowsPath(target) - if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil { - klog.Errorf("rmdir failed: %v, output: %q", err, string(output)) - return err - } - return nil -} - -// List returns a list of all mounted filesystems. todo -func (mounter *Mounter) List() ([]MntPoint, error) { - return []MntPoint{}, nil -} - -// IsMountPointMatch determines if the mountpoint matches the dir -func (mounter *Mounter) IsMountPointMatch(mp MntPoint, dir string) bool { - return mp.Path == dir -} - -// IsNotMountPoint determines if a directory is a mountpoint. -func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { - return IsNotMountPoint(mounter, dir) -} - -// IsLikelyNotMountPoint determines if a directory is not a mountpoint. -func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { - stat, err := os.Lstat(file) - if err != nil { - return true, err - } - // If current file is a symlink, then it is a mountpoint. - if stat.Mode()&os.ModeSymlink != 0 { - target, err := os.Readlink(file) - if err != nil { - return true, fmt.Errorf("readlink error: %v", err) - } - exists, err := mounter.ExistsPath(target) - if err != nil { - return true, err - } - return !exists, nil - } - - return true, nil -} - -// GetDeviceNameFromMount given a mnt point, find the device -func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { - return getDeviceNameFromMount(mounter, mountPath, pluginDir) -} - -// getDeviceNameFromMount find the device(drive) name in which -// the mount path reference should match the given plugin directory. In case no mount path reference -// matches, returns the volume name taken from its given mountPath -func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { - refs, err := mounter.GetMountRefs(mountPath) - if err != nil { - klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) - return "", err - } - if len(refs) == 0 { - return "", fmt.Errorf("directory %s is not mounted", mountPath) - } - basemountPath := normalizeWindowsPath(path.Join(pluginDir, MountsInGlobalPDPath)) - for _, ref := range refs { - if strings.Contains(ref, basemountPath) { - volumeID, err := filepath.Rel(normalizeWindowsPath(basemountPath), ref) - if err != nil { - klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) - return "", err - } - return volumeID, nil - } - } - - return path.Base(mountPath), nil -} - -// DeviceOpened determines if the device is in use elsewhere -func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { - return false, nil -} - -// PathIsDevice determines if a path is a device. -func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) { - return false, nil -} - -// MakeRShared checks that given path is on a mount with 'rshared' mount -// propagation. Empty implementation here. -func (mounter *Mounter) MakeRShared(path string) error { - return nil -} - -// GetFileType checks for sockets/block/character devices -func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { - return getFileType(pathname) -} - -// MakeDir creates a new directory -func (mounter *Mounter) MakeDir(pathname string) error { - err := os.MkdirAll(pathname, os.FileMode(0755)) - if err != nil { - if !os.IsExist(err) { - return err - } - } - return nil -} - -// MakeFile creates an empty file -func (mounter *Mounter) MakeFile(pathname string) error { - f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644)) - defer f.Close() - if err != nil { - if !os.IsExist(err) { - return err - } - } - return nil -} - -// ExistsPath checks whether the path exists -func (mounter *Mounter) ExistsPath(pathname string) (bool, error) { - return utilpath.Exists(utilpath.CheckFollowSymlink, pathname) -} - -// EvalHostSymlinks returns the path name after evaluating symlinks -func (mounter *Mounter) EvalHostSymlinks(pathname string) (string, error) { - return filepath.EvalSymlinks(pathname) -} - -// check whether hostPath is within volume path -// this func will lock all intermediate subpath directories, need to close handle outside of this func after container started -func lockAndCheckSubPath(volumePath, hostPath string) ([]uintptr, error) { - if len(volumePath) == 0 || len(hostPath) == 0 { - return []uintptr{}, nil - } - - finalSubPath, err := filepath.EvalSymlinks(hostPath) - if err != nil { - return []uintptr{}, fmt.Errorf("cannot read link %s: %s", hostPath, err) - } - finalVolumePath, err := filepath.EvalSymlinks(volumePath) - if err != nil { - return []uintptr{}, fmt.Errorf("cannot read link %s: %s", volumePath, err) - } - - return lockAndCheckSubPathWithoutSymlink(finalVolumePath, finalSubPath) -} - -// lock all intermediate subPath directories and check they are all within volumePath -// volumePath & subPath should not contain any symlink, otherwise it will return error -func lockAndCheckSubPathWithoutSymlink(volumePath, subPath string) ([]uintptr, error) { - if len(volumePath) == 0 || len(subPath) == 0 { - return []uintptr{}, nil - } - - // get relative path to volumePath - relSubPath, err := filepath.Rel(volumePath, subPath) - if err != nil { - return []uintptr{}, fmt.Errorf("Rel(%s, %s) error: %v", volumePath, subPath, err) - } - if startsWithBackstep(relSubPath) { - return []uintptr{}, fmt.Errorf("SubPath %q not within volume path %q", subPath, volumePath) - } - - if relSubPath == "." { - // volumePath and subPath are equal - return []uintptr{}, nil - } - - fileHandles := []uintptr{} - var errorResult error - - currentFullPath := volumePath - dirs := strings.Split(relSubPath, string(os.PathSeparator)) - for _, dir := range dirs { - // lock intermediate subPath directory first - currentFullPath = filepath.Join(currentFullPath, dir) - handle, err := lockPath(currentFullPath) - if err != nil { - errorResult = fmt.Errorf("cannot lock path %s: %s", currentFullPath, err) - break - } - fileHandles = append(fileHandles, handle) - - // make sure intermediate subPath directory does not contain symlink any more - stat, err := os.Lstat(currentFullPath) - if err != nil { - errorResult = fmt.Errorf("Lstat(%q) error: %v", currentFullPath, err) - break - } - if stat.Mode()&os.ModeSymlink != 0 { - errorResult = fmt.Errorf("subpath %q is an unexpected symlink after EvalSymlinks", currentFullPath) - break - } - - if !PathWithinBase(currentFullPath, volumePath) { - errorResult = fmt.Errorf("SubPath %q not within volume path %q", currentFullPath, volumePath) - break - } - } - - return fileHandles, errorResult -} - -// unlockPath unlock directories -func unlockPath(fileHandles []uintptr) { - if fileHandles != nil { - for _, handle := range fileHandles { - syscall.CloseHandle(syscall.Handle(handle)) - } - } -} - -// lockPath locks a directory or symlink, return handle, exec "syscall.CloseHandle(handle)" to unlock the path -func lockPath(path string) (uintptr, error) { - if len(path) == 0 { - return uintptr(syscall.InvalidHandle), syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return uintptr(syscall.InvalidHandle), err - } - access := uint32(syscall.GENERIC_READ) - sharemode := uint32(syscall.FILE_SHARE_READ) - createmode := uint32(syscall.OPEN_EXISTING) - flags := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS | syscall.FILE_FLAG_OPEN_REPARSE_POINT) - fd, err := syscall.CreateFile(pathp, access, sharemode, nil, createmode, flags, 0) - return uintptr(fd), err -} - -// PrepareSafeSubpath locks all directories in subPath and check they're not symlinks. -func (mounter *Mounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) { - handles, err := lockAndCheckSubPath(subPath.VolumePath, subPath.Path) - - // Unlock the directories when the container starts - cleanupAction = func() { - unlockPath(handles) - } - return subPath.Path, cleanupAction, err -} - -// CleanSubPaths no bind-mounts for subpaths are necessary on Windows -func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { - return nil -} - -func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - // Try to mount the disk - klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) - - if err := ValidateDiskNumber(source); err != nil { - klog.Errorf("diskMount: formatAndMount failed, err: %v", err) - return err - } - - if len(fstype) == 0 { - // Use 'NTFS' as the default - fstype = "NTFS" - } - - // format disk if it is unformatted(raw) - cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru"+ - " | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", source, fstype) - if output, err := mounter.Exec.Run("powershell", "/c", cmd); err != nil { - return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output)) - } - klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) - - driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec) - if err != nil { - return err - } - driverPath := driveLetter + ":" - target = normalizeWindowsPath(target) - klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) - if output, err := mounter.Exec.Run("cmd", "/c", "mklink", "/D", target, driverPath); err != nil { - klog.Errorf("mklink failed: %v, output: %q", err, string(output)) - return err - } - return nil -} - -func normalizeWindowsPath(path string) string { - normalizedPath := strings.Replace(path, "/", "\\", -1) - if strings.HasPrefix(normalizedPath, "\\") { - normalizedPath = "c:" + normalizedPath - } - return normalizedPath -} - -// ValidateDiskNumber : disk number should be a number in [0, 99] -func ValidateDiskNumber(disk string) error { - diskNum, err := strconv.Atoi(disk) - if err != nil { - return fmt.Errorf("wrong disk number format: %q, err:%v", disk, err) - } - - if diskNum < 0 || diskNum > 99 { - return fmt.Errorf("disk number out of range: %q", disk) - } - - return nil -} - -// Get drive letter according to windows disk number -func getDriveLetterByDiskNumber(diskNum string, exec Exec) (string, error) { - cmd := fmt.Sprintf("(Get-Partition -DiskNumber %s).DriveLetter", diskNum) - output, err := exec.Run("powershell", "/c", cmd) - if err != nil { - return "", fmt.Errorf("azureMount: Get Drive Letter failed: %v, output: %q", err, string(output)) - } - if len(string(output)) < 1 { - return "", fmt.Errorf("azureMount: Get Drive Letter failed, output is empty") - } - return string(output)[:1], nil -} - -// getAllParentLinks walks all symbolic links and return all the parent targets recursively -func getAllParentLinks(path string) ([]string, error) { - const maxIter = 255 - links := []string{} - for { - links = append(links, path) - if len(links) > maxIter { - return links, fmt.Errorf("unexpected length of parent links: %v", links) - } - - fi, err := os.Lstat(path) - if err != nil { - return links, fmt.Errorf("Lstat: %v", err) - } - if fi.Mode()&os.ModeSymlink == 0 { - break - } - - path, err = os.Readlink(path) - if err != nil { - return links, fmt.Errorf("Readlink error: %v", err) - } - } - - return links, nil -} - -// GetMountRefs : empty implementation here since there is no place to query all mount points on Windows -func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(normalizeWindowsPath(pathname)); os.IsNotExist(err) { - return []string{}, nil - } else if err != nil { - return nil, err - } - return []string{pathname}, nil -} - -// GetFSGroup always returns 0. We actually don't set FSGroup on -// windows platform, see SetVolumeOwnership implementation. -func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { - return 0, nil -} - -// GetSELinuxSupport returns false -func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { - // Windows does not support SELinux. - return false, nil -} - -// GetMode returns the filesystem mode. -func (mounter *Mounter) GetMode(pathname string) (os.FileMode, error) { - info, err := os.Stat(pathname) - if err != nil { - return 0, err - } - return info.Mode(), nil -} - -// SafeMakeDir makes sure that the created directory does not escape given base directory mis-using symlinks. -func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode) error { - realBase, err := filepath.EvalSymlinks(base) - if err != nil { - return fmt.Errorf("error resolving symlinks in %s: %s", base, err) - } - - realFullPath := filepath.Join(realBase, subdir) - return doSafeMakeDir(realFullPath, realBase, perm) -} - -func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - klog.V(4).Infof("Creating directory %q within base %q", pathname, base) - - if !PathWithinBase(pathname, base) { - return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) - } - - // Quick check if the directory already exists - s, err := os.Stat(pathname) - if err == nil { - // Path exists - if s.IsDir() { - // The directory already exists. It can be outside of the parent, - // but there is no race-proof check. - klog.V(4).Infof("Directory %s already exists", pathname) - return nil - } - return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} - } - - // Find all existing directories - existingPath, toCreate, err := findExistingPrefix(base, pathname) - if err != nil { - return fmt.Errorf("error opening directory %s: %s", pathname, err) - } - if len(toCreate) == 0 { - return nil - } - - // Ensure the existing directory is inside allowed base - fullExistingPath, err := filepath.EvalSymlinks(existingPath) - if err != nil { - return fmt.Errorf("error opening existing directory %s: %s", existingPath, err) - } - fullBasePath, err := filepath.EvalSymlinks(base) - if err != nil { - return fmt.Errorf("cannot read link %s: %s", base, err) - } - if !PathWithinBase(fullExistingPath, fullBasePath) { - return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err) - } - - // lock all intermediate directories from fullBasePath to fullExistingPath (top to bottom) - fileHandles, err := lockAndCheckSubPathWithoutSymlink(fullBasePath, fullExistingPath) - defer unlockPath(fileHandles) - if err != nil { - return err - } - - klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) - currentPath := fullExistingPath - // create the directories one by one, making sure nobody can change - // created directory into symlink by lock that directory immediately - for _, dir := range toCreate { - currentPath = filepath.Join(currentPath, dir) - klog.V(4).Infof("Creating %s", dir) - if err := os.Mkdir(currentPath, perm); err != nil { - return fmt.Errorf("cannot create directory %s: %s", currentPath, err) - } - handle, err := lockPath(currentPath) - if err != nil { - return fmt.Errorf("cannot lock path %s: %s", currentPath, err) - } - defer syscall.CloseHandle(syscall.Handle(handle)) - // make sure newly created directory does not contain symlink after lock - stat, err := os.Lstat(currentPath) - if err != nil { - return fmt.Errorf("Lstat(%q) error: %v", currentPath, err) - } - if stat.Mode()&os.ModeSymlink != 0 { - return fmt.Errorf("subpath %q is an unexpected symlink after Mkdir", currentPath) - } - } - - return nil -} - -// findExistingPrefix finds prefix of pathname that exists. In addition, it -// returns list of remaining directories that don't exist yet. -func findExistingPrefix(base, pathname string) (string, []string, error) { - rel, err := filepath.Rel(base, pathname) - if err != nil { - return base, nil, err - } - - if startsWithBackstep(rel) { - return base, nil, fmt.Errorf("pathname(%s) is not within base(%s)", pathname, base) - } - - if rel == "." { - // base and pathname are equal - return pathname, []string{}, nil - } - - dirs := strings.Split(rel, string(filepath.Separator)) - - parent := base - currentPath := base - for i, dir := range dirs { - parent = currentPath - currentPath = filepath.Join(parent, dir) - if _, err := os.Lstat(currentPath); err != nil { - if os.IsNotExist(err) { - return parent, dirs[i:], nil - } - return base, nil, err - } - } - - return pathname, []string{}, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/BUILD index 794192566d5..b1297ec4df0 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/BUILD @@ -29,7 +29,6 @@ go_library( "//staging/src/k8s.io/client-go/pkg/version:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/vmware/govmomi/find:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", "//vendor/github.com/vmware/govmomi/pbm:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers/vdm.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers/vdm.go index 654d018a112..08f650c70be 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers/vdm.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers/vdm.go @@ -39,11 +39,17 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl if diskManager.volumeOptions.SCSIControllerType == "" { diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType } - // Create virtual disk - diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat] - // Create a virtual disk manager - vdm := object.NewVirtualDiskManager(datastore.Client()) + + // Check for existing VMDK before attempting create. Because a name collision + // is unlikely, "VMDK already exists" is likely from a previous attempt to + // create this volume. + if dsPath := vclib.GetPathFromVMDiskPath(diskManager.diskPath); datastore.Exists(ctx, dsPath) { + klog.V(2).Infof("Create: VirtualDisk already exists, returning success. Name=%q", diskManager.diskPath) + return diskManager.diskPath, nil + } + // Create specification for new virtual disk + diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat] vmDiskSpec := &types.FileBackedVirtualDiskSpec{ VirtualDiskSpec: types.VirtualDiskSpec{ AdapterType: diskManager.volumeOptions.SCSIControllerType, @@ -51,6 +57,8 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl }, CapacityKb: int64(diskManager.volumeOptions.CapacityKB), } + + vdm := object.NewVirtualDiskManager(datastore.Client()) requestTime := time.Now() // Create virtual disk task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/vsphere_metrics.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/vsphere_metrics.go index 5baafc4ff0e..4b25a811078 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/vsphere_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/vsphere_metrics.go @@ -19,8 +19,6 @@ package vclib import ( "time" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -103,17 +101,17 @@ func RecordvSphereMetric(actionName string, requestTime time.Time, err error) { func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) { if err != nil { - vsphereAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc() + vsphereAPIErrorMetric.With(metrics.Labels{"request": actionName}).Inc() } else { - vsphereAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime)) + vsphereAPIMetric.With(metrics.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime)) } } func recordvSphereOperationMetric(actionName string, requestTime time.Time, err error) { if err != nil { - vsphereOperationErrorMetric.With(prometheus.Labels{"operation": actionName}).Inc() + vsphereOperationErrorMetric.With(metrics.Labels{"operation": actionName}).Inc() } else { - vsphereOperationMetric.With(prometheus.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime)) + vsphereOperationMetric.With(metrics.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/OWNERS b/cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/OWNERS rename to cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/doc.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/doc.go similarity index 91% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/doc.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/doc.go index 15179e53f2d..c81b426ce8c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package mount defines an interface to mounting filesystems. -package mount // import "k8s.io/kubernetes/pkg/util/mount" +package mount // import "k8s.io/utils/mount" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/fake_mounter.go similarity index 85% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/fake_mounter.go index 66b877064d0..d3a82f415c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/fake_mounter.go @@ -27,14 +27,18 @@ import ( // FakeMounter implements mount.Interface for tests. type FakeMounter struct { MountPoints []MountPoint - Log []FakeAction + log []FakeAction // Error to return for a path when calling IsLikelyNotMountPoint MountCheckErrors map[string]error // Some tests run things in parallel, make sure the mounter does not produce // any golang's DATA RACE warnings. - mutex sync.Mutex + mutex sync.Mutex + UnmountFunc UnmountFunc } +// UnmountFunc is a function callback to be executed during the Unmount() call. +type UnmountFunc func(path string) error + var _ Interface = &FakeMounter{} const ( @@ -52,12 +56,28 @@ type FakeAction struct { FSType string // applies only to "mount" actions } +// NewFakeMounter returns a FakeMounter struct that implements Interface and is +// suitable for testing purposes. +func NewFakeMounter(mps []MountPoint) *FakeMounter { + return &FakeMounter{ + MountPoints: mps, + } +} + // ResetLog clears all the log entries in FakeMounter func (f *FakeMounter) ResetLog() { f.mutex.Lock() defer f.mutex.Unlock() - f.Log = []FakeAction{} + f.log = []FakeAction{} +} + +// GetLog returns the slice of FakeActions taken by the mounter +func (f *FakeMounter) GetLog() []FakeAction { + f.mutex.Lock() + defer f.mutex.Unlock() + + return f.log } // Mount records the mount event and updates the in-memory mount points for FakeMounter @@ -99,7 +119,7 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options } f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) - f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) + f.log = append(f.log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) return nil } @@ -117,6 +137,12 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints := []MountPoint{} for _, mp := range f.MountPoints { if mp.Path == absTarget { + if f.UnmountFunc != nil { + err := f.UnmountFunc(absTarget) + if err != nil { + return err + } + } klog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) // Don't copy it to newMountpoints continue @@ -124,7 +150,7 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type}) } f.MountPoints = newMountpoints - f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) + f.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) delete(f.MountCheckErrors, target) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go similarity index 91% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go index 7324d691144..821cc875345 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go @@ -23,6 +23,8 @@ import ( "os" "path/filepath" "strings" + + utilexec "k8s.io/utils/exec" ) const ( @@ -45,21 +47,21 @@ type Interface interface { // is not a mountpoint. // It should return ErrNotExist when the directory does not exist. // IsLikelyNotMountPoint does NOT properly detect all mountpoint types - // most notably linux bind mounts and symbolic link. + // most notably linux bind mounts and symbolic link. For callers that do not + // care about such situations, this is a faster alternative to calling List() + // and scanning that output. IsLikelyNotMountPoint(file string) (bool, error) - // GetMountRefs finds all mount references to the path, returns a - // list of paths. Path could be a mountpoint path, device or a normal - // directory (for bind mount). + // GetMountRefs finds all mount references to pathname, returning a slice of + // paths. Pathname can be a mountpoint path or a normal directory + // (for bind mount). On Linux, pathname is excluded from the slice. + // For example, if /dev/sdc was mounted at /path/a and /path/b, + // GetMountRefs("/path/a") would return ["/path/b"] + // GetMountRefs("/path/b") would return ["/path/a"] + // On Windows there is no way to query all mount points; as long as pathname is + // a valid mount, it will be returned. GetMountRefs(pathname string) ([]string, error) } -// Exec is an interface for executing commands on systems. -type Exec interface { - // Run executes a command and returns its stdout + stderr combined in one - // stream. - Run(cmd string, args ...string) ([]byte, error) -} - // Compile-time check to ensure all Mounter implementations satisfy // the mount interface. var _ Interface = &Mounter{} @@ -79,7 +81,7 @@ type MountPoint struct { // mounts it otherwise the device is formatted first then mounted. type SafeFormatAndMount struct { Interface - Exec + Exec utilexec.Interface } // FormatAndMount formats the given disk, if needed, and mounts it. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_common.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_common.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_common.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_common.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_unix.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_unix.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_unix.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_unix.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_windows.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_windows.go similarity index 93% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_windows.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_windows.go index 5e6b1dd9adc..516f970d282 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_helper_windows.go @@ -70,6 +70,9 @@ func IsCorruptedMnt(err error) bool { return false } +// NormalizeWindowsPath makes sure the given path is a valid path on Windows +// systems by making sure all instances of `/` are replaced with `\\`, and the +// path beings with `c:` func NormalizeWindowsPath(path string) string { normalizedPath := strings.Replace(path, "/", "\\", -1) if strings.HasPrefix(normalizedPath, "\\") { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go similarity index 97% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go index dfd52bfe105..2a910024297 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go @@ -238,7 +238,7 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { } // GetMountRefs finds all mount references to pathname, returns a -// list of paths. Path could be a mountpoint path, device or a normal +// list of paths. Path could be a mountpoint or a normal // directory (for bind mount). func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { pathExists, pathErr := PathExists(pathname) @@ -273,7 +273,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) args := []string{"-a", source} - out, err := mounter.Exec.Run("fsck", args...) + out, err := mounter.Exec.Command("fsck", args...).CombinedOutput() if err != nil { ee, isExitError := err.(utilexec.ExitError) switch { @@ -320,7 +320,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } } klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) - _, err := mounter.Exec.Run("mkfs."+fstype, args...) + _, err := mounter.Exec.Command("mkfs."+fstype, args...).CombinedOutput() if err == nil { // the disk has been formatted successfully try to mount it again. klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) @@ -344,7 +344,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk} klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) - dataOut, err := mounter.Exec.Run("blkid", args...) + dataOut, err := mounter.Exec.Command("blkid", args...).CombinedOutput() output := string(dataOut) klog.V(4).Infof("Output: %q, err: %v", output, err) @@ -441,7 +441,8 @@ func parseProcMounts(content []byte) ([]MountPoint, error) { // SearchMountPoints finds all mount references to the source, returns a list of // mountpoints. -// This function assumes source cannot be device. +// The source can be a mount point or a normal directory (bind mount). We +// didn't support device because there is no use case by now. // Some filesystems may share a source name, e.g. tmpfs. And for bind mounting, // it's possible to mount a non-root path of a filesystem, so we need to use // root path and major:minor to represent mount source uniquely. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_unsupported.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_unsupported.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_windows.go similarity index 95% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go rename to cluster-autoscaler/vendor/k8s.io/utils/mount/mount_windows.go index f77931d1446..9371ff43551 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_windows.go @@ -26,6 +26,7 @@ import ( "strings" "k8s.io/klog" + utilexec "k8s.io/utils/exec" "k8s.io/utils/keymutex" utilpath "k8s.io/utils/path" ) @@ -219,7 +220,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // format disk if it is unformatted(raw) cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru"+ " | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", source, fstype) - if output, err := mounter.Exec.Run("powershell", "/c", cmd); err != nil { + if output, err := mounter.Exec.Command("powershell", "/c", cmd).CombinedOutput(); err != nil { return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output)) } klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) @@ -231,7 +232,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, driverPath := driveLetter + ":" target = NormalizeWindowsPath(target) klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) - if output, err := mounter.Exec.Run("cmd", "/c", "mklink", "/D", target, driverPath); err != nil { + if output, err := mounter.Exec.Command("cmd", "/c", "mklink", "/D", target, driverPath).CombinedOutput(); err != nil { klog.Errorf("mklink failed: %v, output: %q", err, string(output)) return err } @@ -239,9 +240,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } // Get drive letter according to windows disk number -func getDriveLetterByDiskNumber(diskNum string, exec Exec) (string, error) { +func getDriveLetterByDiskNumber(diskNum string, exec utilexec.Interface) (string, error) { cmd := fmt.Sprintf("(Get-Partition -DiskNumber %s).DriveLetter", diskNum) - output, err := exec.Run("powershell", "/c", cmd) + output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() if err != nil { return "", fmt.Errorf("azureMount: Get Drive Letter failed: %v, output: %q", err, string(output)) } diff --git a/cluster-autoscaler/vendor/k8s.io/utils/net/net.go b/cluster-autoscaler/vendor/k8s.io/utils/net/net.go index 13449ac1c50..2690aa0c20f 100644 --- a/cluster-autoscaler/vendor/k8s.io/utils/net/net.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/net/net.go @@ -17,8 +17,12 @@ limitations under the License. package net import ( + "errors" "fmt" + "math" + "math/big" "net" + "strconv" ) // ParseCIDRs parses a list of cidrs and return error if any is invalid. @@ -132,3 +136,54 @@ func IsIPv6CIDR(cidr *net.IPNet) bool { ip := cidr.IP return IsIPv6(ip) } + +// ParsePort parses a string representing an IP port. If the string is not a +// valid port number, this returns an error. +func ParsePort(port string, allowZero bool) (int, error) { + portInt, err := strconv.ParseUint(port, 10, 16) + if err != nil { + return 0, err + } + if portInt == 0 && !allowZero { + return 0, errors.New("0 is not a valid port number") + } + return int(portInt), nil +} + +// BigForIP creates a big.Int based on the provided net.IP +func BigForIP(ip net.IP) *big.Int { + b := ip.To4() + if b == nil { + b = ip.To16() + } + return big.NewInt(0).SetBytes(b) +} + +// AddIPOffset adds the provided integer offset to a base big.Int representing a +// net.IP +func AddIPOffset(base *big.Int, offset int) net.IP { + return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes()) +} + +// RangeSize returns the size of a range in valid addresses. +// returns the size of the subnet (or math.MaxInt64 if the range size would overflow int64) +func RangeSize(subnet *net.IPNet) int64 { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // this checks that we are not overflowing an int64 + if bits-ones >= 63 { + return math.MaxInt64 + } + return int64(1) << uint(bits-ones) +} + +// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. +func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { + ip := AddIPOffset(BigForIP(subnet.IP), index) + if !subnet.Contains(ip) { + return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) + } + return ip, nil +} diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 1e314a36594..f6467a33881 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -1,6 +1,6 @@ # cloud.google.com/go v0.38.0 => cloud.google.com/go v0.38.0 cloud.google.com/go/compute/metadata -# github.com/Azure/azure-sdk-for-go v32.5.0+incompatible => github.com/Azure/azure-sdk-for-go v32.5.0+incompatible +# github.com/Azure/azure-sdk-for-go v35.0.0+incompatible => github.com/Azure/azure-sdk-for-go v35.0.0+incompatible github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network @@ -39,7 +39,7 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock github.com/JeffAshton/win_pdh # github.com/Microsoft/go-winio v0.4.11 => github.com/Microsoft/go-winio v0.4.11 github.com/Microsoft/go-winio -# github.com/Microsoft/hcsshim v0.8.6 => github.com/Microsoft/hcsshim v0.8.6 +# github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d => github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/hcn github.com/Microsoft/hcsshim/internal/guid @@ -53,7 +53,6 @@ github.com/Microsoft/hcsshim/internal/cni github.com/Microsoft/hcsshim/internal/interop github.com/Microsoft/hcsshim/internal/regstate github.com/Microsoft/hcsshim/internal/runhcs -github.com/Microsoft/hcsshim/internal/guestrequest github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/timeout github.com/Microsoft/hcsshim/internal/schema2 @@ -110,7 +109,7 @@ github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/private/protocol/jsonrpc github.com/aws/aws-sdk-go/private/protocol/json/jsonutil github.com/aws/aws-sdk-go/service/ecr -# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 => github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +# github.com/beorn7/perks v1.0.0 => github.com/beorn7/perks v1.0.0 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible => github.com/blang/semver v3.5.0+incompatible github.com/blang/semver @@ -118,7 +117,7 @@ github.com/blang/semver github.com/checkpoint-restore/go-criu/rpc # github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 github.com/clusterhq/flocker-go -# github.com/container-storage-interface/spec v1.1.0 => github.com/container-storage-interface/spec v1.1.0 +# github.com/container-storage-interface/spec v1.2.0 => github.com/container-storage-interface/spec v1.2.0 github.com/container-storage-interface/spec/lib/go/csi # github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa => github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa github.com/containerd/console @@ -139,31 +138,11 @@ github.com/containernetworking/cni/pkg/types/020 github.com/containernetworking/cni/pkg/invoke github.com/containernetworking/cni/pkg/version github.com/containernetworking/cni/pkg/types/current -# github.com/coreos/etcd v3.3.15+incompatible => github.com/coreos/etcd v3.3.15+incompatible -github.com/coreos/etcd/clientv3 -github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes -github.com/coreos/etcd/mvcc/mvccpb -github.com/coreos/etcd/pkg/transport -github.com/coreos/etcd/auth/authpb -github.com/coreos/etcd/clientv3/balancer -github.com/coreos/etcd/clientv3/balancer/picker -github.com/coreos/etcd/clientv3/balancer/resolver/endpoint -github.com/coreos/etcd/clientv3/credentials -github.com/coreos/etcd/etcdserver/etcdserverpb -github.com/coreos/etcd/pkg/logutil -github.com/coreos/etcd/pkg/types -github.com/coreos/etcd/pkg/tlsutil -github.com/coreos/etcd/clientv3/balancer/connectivity -github.com/coreos/etcd/pkg/systemd -github.com/coreos/etcd/raft -github.com/coreos/etcd/raft/raftpb -# github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 +# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e github.com/coreos/go-systemd/daemon github.com/coreos/go-systemd/dbus -github.com/coreos/go-systemd/util github.com/coreos/go-systemd/journal # github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea -github.com/coreos/pkg/dlopen github.com/coreos/pkg/capnslog # github.com/cyphar/filepath-securejoin v0.2.2 => github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin @@ -207,9 +186,9 @@ github.com/docker/docker/pkg/term/windows github.com/docker/go-connections/nat github.com/docker/go-connections/tlsconfig github.com/docker/go-connections/sockets -# github.com/docker/go-units v0.3.3 => github.com/docker/go-units v0.3.3 +# github.com/docker/go-units v0.4.0 => github.com/docker/go-units v0.4.0 github.com/docker/go-units -# github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789 => github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789 +# github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 github.com/docker/libnetwork/ipvs # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream @@ -223,20 +202,20 @@ github.com/euank/go-kmsg-parser/kmsgparser github.com/evanphx/json-patch # github.com/fsnotify/fsnotify v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify -# github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 => github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 +# github.com/ghodss/yaml v1.0.0 => github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml -# github.com/go-openapi/jsonpointer v0.19.2 => github.com/go-openapi/jsonpointer v0.19.2 +# github.com/go-openapi/jsonpointer v0.19.3 => github.com/go-openapi/jsonpointer v0.19.3 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.2 => github.com/go-openapi/jsonreference v0.19.2 +# github.com/go-openapi/jsonreference v0.19.3 => github.com/go-openapi/jsonreference v0.19.3 github.com/go-openapi/jsonreference -# github.com/go-openapi/spec v0.19.2 => github.com/go-openapi/spec v0.19.2 +# github.com/go-openapi/spec v0.19.3 => github.com/go-openapi/spec v0.19.3 github.com/go-openapi/spec -# github.com/go-openapi/swag v0.19.2 => github.com/go-openapi/swag v0.19.2 +# github.com/go-openapi/swag v0.19.5 => github.com/go-openapi/swag v0.19.5 github.com/go-openapi/swag # github.com/go-ozzo/ozzo-validation v3.5.0+incompatible => github.com/go-ozzo/ozzo-validation v3.5.0+incompatible github.com/go-ozzo/ozzo-validation github.com/go-ozzo/ozzo-validation/is -# github.com/godbus/dbus v4.1.0+incompatible => github.com/godbus/dbus v4.1.0+incompatible +# github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f => github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f github.com/godbus/dbus # github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d => github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf/proto @@ -248,7 +227,7 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/golang/groupcache/lru # github.com/golang/mock v1.2.0 => github.com/golang/mock v1.2.0 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.3.1 => github.com/golang/protobuf v1.3.1 +# github.com/golang/protobuf v1.3.2 => github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/protoc-gen-go/descriptor @@ -349,7 +328,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus # github.com/hashicorp/golang-lru v0.5.1 => github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/heketi/heketi v9.0.0+incompatible => github.com/heketi/heketi v9.0.0+incompatible +# github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible => github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible github.com/heketi/heketi/client/api/go-client github.com/heketi/heketi/pkg/glusterfs/api github.com/heketi/heketi/pkg/utils @@ -359,7 +338,7 @@ github.com/imdario/mergo github.com/inconshreveable/mousetrap # github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af => github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jmespath/go-jmespath -# github.com/json-iterator/go v1.1.7 => github.com/json-iterator/go v1.1.7 +# github.com/json-iterator/go v1.1.8 => github.com/json-iterator/go v1.1.8 github.com/json-iterator/go # github.com/karrick/godirwalk v1.7.5 => github.com/karrick/godirwalk v1.7.5 github.com/karrick/godirwalk @@ -375,7 +354,7 @@ github.com/libopenstorage/openstorage/pkg/parser github.com/libopenstorage/openstorage/pkg/units # github.com/lithammer/dedent v1.1.0 => github.com/lithammer/dedent v1.1.0 github.com/lithammer/dedent -# github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 => github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 +# github.com/mailru/easyjson v0.7.0 => github.com/mailru/easyjson v0.7.0 github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter github.com/mailru/easyjson/buffer @@ -385,7 +364,7 @@ github.com/mattn/go-shellwords github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/dns v1.1.4 => github.com/miekg/dns v1.1.4 github.com/miekg/dns -# github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2 => github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2 +# github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 => github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 github.com/mindprince/gonvml # github.com/mistifyio/go-zfs v2.1.1+incompatible => github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/mistifyio/go-zfs @@ -399,9 +378,9 @@ github.com/modern-go/reflect2 github.com/mohae/deepcopy # github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c => github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c github.com/morikuni/aec -# github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058 => github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058 +# github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 => github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/mrunalp/fileutils -# github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d => github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 => github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f => github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/mxk/go-flowrate/flowrate @@ -410,7 +389,7 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 => github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go -# github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830 => github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830 +# github.com/opencontainers/runc v1.0.0-rc9 => github.com/opencontainers/runc v1.0.0-rc9 github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/cgroups/fs github.com/opencontainers/runc/libcontainer/cgroups/systemd @@ -429,30 +408,26 @@ github.com/opencontainers/runc/libcontainer/seccomp github.com/opencontainers/runc/libcontainer/stacktrace # github.com/opencontainers/runtime-spec v1.0.0 => github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/selinux v1.2.2 => github.com/opencontainers/selinux v1.2.2 +# github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 => github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label -# github.com/pborman/uuid v1.2.0 => github.com/pborman/uuid v1.2.0 -github.com/pborman/uuid -# github.com/pkg/errors v0.8.0 => github.com/pkg/errors v0.8.0 +# github.com/pkg/errors v0.8.1 => github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 => github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v0.9.2 => github.com/prometheus/client_golang v0.9.2 +# github.com/prometheus/client_golang v1.0.0 => github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 +github.com/prometheus/client_golang/prometheus/internal +# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 => github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 +# github.com/prometheus/common v0.4.1 => github.com/prometheus/common v0.4.1 github.com/prometheus/common/expfmt github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -# github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a +# github.com/prometheus/procfs v0.0.2 => github.com/prometheus/procfs v0.0.2 github.com/prometheus/procfs -github.com/prometheus/procfs/nfs -github.com/prometheus/procfs/xfs -github.com/prometheus/procfs/internal/util +github.com/prometheus/procfs/internal/fs # github.com/quobyte/api v0.1.2 => github.com/quobyte/api v0.1.2 github.com/quobyte/api # github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c => github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c @@ -468,7 +443,7 @@ github.com/spf13/afero github.com/spf13/afero/mem # github.com/spf13/cobra v0.0.5 => github.com/spf13/cobra v0.0.5 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.3 => github.com/spf13/pflag v1.0.3 +# github.com/spf13/pflag v1.0.5 => github.com/spf13/pflag v1.0.5 github.com/spf13/pflag # github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc => github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc github.com/storageos/go-api @@ -477,21 +452,21 @@ github.com/storageos/go-api/netutil github.com/storageos/go-api/serror # github.com/stretchr/objx v0.2.0 => github.com/stretchr/objx v0.2.0 github.com/stretchr/objx -# github.com/stretchr/testify v1.3.0 => github.com/stretchr/testify v1.3.0 +# github.com/stretchr/testify v1.4.0 => github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/mock github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4 => github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4 +# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 => github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/syndtr/gocapability/capability # github.com/thecodeteam/goscaleio v0.1.0 => github.com/thecodeteam/goscaleio v0.1.0 github.com/thecodeteam/goscaleio github.com/thecodeteam/goscaleio/types/v1 -# github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e => github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e +# github.com/vishvananda/netlink v1.0.0 => github.com/vishvananda/netlink v1.0.0 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 => github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 github.com/vishvananda/netns -# github.com/vmware/govmomi v0.20.1 => github.com/vmware/govmomi v0.20.1 +# github.com/vmware/govmomi v0.20.3 => github.com/vmware/govmomi v0.20.3 github.com/vmware/govmomi/find github.com/vmware/govmomi/object github.com/vmware/govmomi/property @@ -518,6 +493,27 @@ github.com/vmware/govmomi/lookup github.com/vmware/govmomi/lookup/types github.com/vmware/govmomi/sts/internal github.com/vmware/govmomi/lookup/methods +# go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 => go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 +go.etcd.io/etcd/clientv3 +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/clientv3/balancer +go.etcd.io/etcd/clientv3/balancer/picker +go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/credentials +go.etcd.io/etcd/etcdserver/etcdserverpb +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/clientv3/balancer/connectivity +go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/raft +go.etcd.io/etcd/raft/confchange +go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/raft/tracker # go.opencensus.io v0.21.0 => go.opencensus.io v0.21.0 go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 @@ -535,25 +531,25 @@ go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate go.opencensus.io/resource go.opencensus.io -# go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 => go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 +# go.uber.org/atomic v1.3.2 => go.uber.org/atomic v1.3.2 go.uber.org/atomic -# go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df => go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df +# go.uber.org/multierr v1.1.0 => go.uber.org/multierr v1.1.0 go.uber.org/multierr -# go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 => go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 +# go.uber.org/zap v1.10.0 => go.uber.org/zap v1.10.0 go.uber.org/zap go.uber.org/zap/zapcore go.uber.org/zap/internal/bufferpool go.uber.org/zap/buffer go.uber.org/zap/internal/color go.uber.org/zap/internal/exit -# golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 +# golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 => golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 golang.org/x/crypto/pkcs12 golang.org/x/crypto/ssh/terminal golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/pbkdf2 -# golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc => golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc +# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 => golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 golang.org/x/net/http2 golang.org/x/net/http/httpguts golang.org/x/net/http2/hpack @@ -572,24 +568,24 @@ golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/proxy golang.org/x/net/internal/socks -# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a +# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 => golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 +# golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc -# golang.org/x/text v0.3.2 => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db +# golang.org/x/text v0.3.2 => golang.org/x/text v0.3.2 golang.org/x/text/secure/bidirule golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/transform golang.org/x/text/width -# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d +# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate # google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 => google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 google.golang.org/api/compute/v1 @@ -618,8 +614,7 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api # google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 => google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/grpc v1.23.0 => google.golang.org/grpc v1.23.0 +# google.golang.org/grpc v1.23.1 => google.golang.org/grpc v1.23.1 google.golang.org/grpc google.golang.org/grpc/naming google.golang.org/grpc/balancer @@ -658,7 +653,7 @@ gopkg.in/gcfg.v1 gopkg.in/gcfg.v1/scanner gopkg.in/gcfg.v1/token gopkg.in/gcfg.v1/types -# gopkg.in/inf.v0 v0.9.0 => gopkg.in/inf.v0 v0.9.0 +# gopkg.in/inf.v0 v0.9.1 => gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/square/go-jose.v2 v2.2.2 => gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/square/go-jose.v2 @@ -667,9 +662,9 @@ gopkg.in/square/go-jose.v2/cipher gopkg.in/square/go-jose.v2/json # gopkg.in/warnings.v0 v0.1.1 => gopkg.in/warnings.v0 v0.1.1 gopkg.in/warnings.v0 -# gopkg.in/yaml.v2 v2.2.2 => gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/yaml.v2 v2.2.4 => gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/api +# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/api k8s.io/api/core/v1 k8s.io/api/apps/v1 k8s.io/api/policy/v1beta1 @@ -688,8 +683,10 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1beta1 k8s.io/api/discovery/v1alpha1 +k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 +k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1alpha1 @@ -711,9 +708,9 @@ k8s.io/api/authorization/v1beta1 k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1 k8s.io/api/imagepolicy/v1alpha1 -# k8s.io/apiextensions-apiserver v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apiextensions-apiserver +# k8s.io/apiextensions-apiserver v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiextensions-apiserver k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apimachinery +# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apimachinery k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/util/sets @@ -721,10 +718,10 @@ k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/net k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/labels -k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/uuid +k8s.io/apimachinery/pkg/api/equality +k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/runtime/schema @@ -746,6 +743,7 @@ k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/runtime/serializer k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -753,10 +751,8 @@ k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/apis/meta/internalversion -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/third_party/forked/golang/json -k8s.io/apimachinery/pkg/runtime/serializer/yaml k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/yaml @@ -769,9 +765,10 @@ k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path +k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation -# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/apiserver +# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiserver k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/admission @@ -783,12 +780,13 @@ k8s.io/apiserver/pkg/authentication/authenticatorfactory k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/authorization/authorizerfactory k8s.io/apiserver/pkg/server +k8s.io/apiserver/pkg/server/dynamiccertificates k8s.io/apiserver/pkg/admission/plugin/webhook/mutating k8s.io/apiserver/pkg/apis/audit k8s.io/apiserver/pkg/apis/audit/v1 k8s.io/apiserver/pkg/audit k8s.io/apiserver/pkg/apis/apiserver -k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 +k8s.io/apiserver/pkg/apis/apiserver/v1 k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/server/httplog k8s.io/apiserver/pkg/endpoints/handlers/responsewriters @@ -836,6 +834,7 @@ k8s.io/apiserver/pkg/apis/audit/v1beta1 k8s.io/apiserver/pkg/endpoints/handlers/negotiation k8s.io/apiserver/pkg/storage k8s.io/apiserver/pkg/admission/initializer +k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 k8s.io/apiserver/pkg/apis/audit/validation k8s.io/apiserver/pkg/endpoints/handlers k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager @@ -851,8 +850,9 @@ k8s.io/apiserver/pkg/admission/plugin/webhook/rules k8s.io/apiserver/pkg/util/dryrun k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1 k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 -# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/client-go +# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/client-go k8s.io/client-go/kubernetes k8s.io/client-go/rest k8s.io/client-go/tools/clientcmd @@ -892,8 +892,10 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/discovery/v1alpha1 +k8s.io/client-go/kubernetes/typed/discovery/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/node/v1alpha1 @@ -930,6 +932,7 @@ k8s.io/client-go/informers/core k8s.io/client-go/informers/discovery k8s.io/client-go/informers/events k8s.io/client-go/informers/extensions +k8s.io/client-go/informers/flowcontrol k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/node @@ -941,14 +944,9 @@ k8s.io/client-go/informers/storage k8s.io/client-go/util/keyutil k8s.io/client-go/tools/record/util k8s.io/client-go/testing -k8s.io/client-go/informers/apps/v1 -k8s.io/client-go/informers/policy/v1beta1 -k8s.io/client-go/informers/storage/v1 -k8s.io/client-go/informers/storage/v1beta1 -k8s.io/client-go/tools/events k8s.io/client-go/listers/storage/v1 +k8s.io/client-go/tools/events k8s.io/client-go/util/workqueue -k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/discovery/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake @@ -971,8 +969,10 @@ k8s.io/client-go/kubernetes/typed/coordination/v1/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1/fake k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake @@ -997,6 +997,7 @@ k8s.io/client-go/util/connrotation k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/informers/admissionregistration/v1 k8s.io/client-go/informers/admissionregistration/v1beta1 +k8s.io/client-go/informers/apps/v1 k8s.io/client-go/informers/apps/v1beta1 k8s.io/client-go/informers/apps/v1beta2 k8s.io/client-go/informers/auditregistration/v1alpha1 @@ -1010,12 +1011,15 @@ k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination/v1 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/discovery/v1alpha1 +k8s.io/client-go/informers/discovery/v1beta1 k8s.io/client-go/informers/events/v1beta1 k8s.io/client-go/informers/extensions/v1beta1 +k8s.io/client-go/informers/flowcontrol/v1alpha1 k8s.io/client-go/informers/networking/v1 k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/informers/node/v1alpha1 k8s.io/client-go/informers/node/v1beta1 +k8s.io/client-go/informers/policy/v1beta1 k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 @@ -1023,8 +1027,11 @@ k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 k8s.io/client-go/informers/scheduling/v1beta1 k8s.io/client-go/informers/settings/v1alpha1 +k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 +k8s.io/client-go/informers/storage/v1beta1 k8s.io/client-go/util/certificate +k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/tools/remotecommand k8s.io/client-go/scale k8s.io/client-go/listers/admissionregistration/v1 @@ -1041,8 +1048,10 @@ k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/discovery/v1alpha1 +k8s.io/client-go/listers/discovery/v1beta1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 +k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/networking/v1 k8s.io/client-go/listers/networking/v1beta1 k8s.io/client-go/listers/node/v1alpha1 @@ -1070,57 +1079,66 @@ k8s.io/client-go/tools/watch k8s.io/client-go/metadata/metadatainformer k8s.io/client-go/metadata k8s.io/client-go/metadata/metadatalister -# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cloud-provider +# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cloud-provider k8s.io/cloud-provider/volume k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/component-base +# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/component-base k8s.io/component-base/cli/flag k8s.io/component-base/config -k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry -k8s.io/component-base/featuregate +k8s.io/component-base/metrics +k8s.io/component-base/metrics/prometheus/restclient k8s.io/component-base/version +k8s.io/component-base/featuregate +k8s.io/component-base/version/verflag k8s.io/component-base/logs +k8s.io/component-base/config/validation k8s.io/component-base/config/v1alpha1 -# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/cri-api +# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cri-api k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/csi-translation-lib +# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib -# k8s.io/klog v0.4.0 => k8s.io/klog v0.4.0 +# k8s.io/klog v1.0.0 => k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf => k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf +# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a => k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/schemaconv -# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kube-proxy +# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-proxy k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kubectl +# k8s.io/kube-scheduler v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-scheduler +k8s.io/kube-scheduler/config/v1 +k8s.io/kube-scheduler/config/v1alpha1 +# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubectl k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/kubelet +# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubelet k8s.io/kubelet/config/v1beta1 -# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes +k8s.io/kubelet/pkg/apis/pluginregistration/v1 +k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 +# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes k8s.io/kubernetes/pkg/client/leaderelectionconfig k8s.io/kubernetes/pkg/apis/scheduling k8s.io/kubernetes/pkg/kubelet/types k8s.io/kubernetes/pkg/scheduler/nodeinfo k8s.io/kubernetes/pkg/kubelet/apis k8s.io/kubernetes/pkg/kubemark -k8s.io/kubernetes/pkg/scheduler/api k8s.io/kubernetes/pkg/scheduler/util -k8s.io/kubernetes/pkg/client/metrics/prometheus +k8s.io/kubernetes/pkg/scheduler/api k8s.io/kubernetes/pkg/scheduler k8s.io/kubernetes/pkg/scheduler/algorithm/predicates k8s.io/kubernetes/pkg/scheduler/algorithmprovider -k8s.io/kubernetes/pkg/scheduler/factory +k8s.io/kubernetes/pkg/scheduler/apis/config +k8s.io/kubernetes/pkg/scheduler/listers +k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot k8s.io/kubernetes/pkg/api/v1/pod k8s.io/kubernetes/pkg/api/testapi k8s.io/kubernetes/pkg/apis/core @@ -1141,7 +1159,6 @@ k8s.io/kubernetes/pkg/proxy k8s.io/kubernetes/pkg/proxy/config k8s.io/kubernetes/pkg/proxy/iptables k8s.io/kubernetes/pkg/util/iptables -k8s.io/kubernetes/pkg/util/mount k8s.io/kubernetes/pkg/util/node k8s.io/kubernetes/pkg/util/oom k8s.io/kubernetes/pkg/util/sysctl @@ -1169,21 +1186,24 @@ k8s.io/kubernetes/pkg/volume/storageos k8s.io/kubernetes/pkg/volume/util/hostutil k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/test/utils -k8s.io/kubernetes/pkg/scheduler/metrics -k8s.io/kubernetes/pkg/scheduler/api/latest -k8s.io/kubernetes/pkg/scheduler/apis/config +k8s.io/kubernetes/pkg/scheduler/apis/extender/v1 +k8s.io/kubernetes/pkg/scheduler/algorithm +k8s.io/kubernetes/pkg/scheduler/algorithm/priorities +k8s.io/kubernetes/pkg/scheduler/apis/config/scheme +k8s.io/kubernetes/pkg/scheduler/apis/config/validation k8s.io/kubernetes/pkg/scheduler/core +k8s.io/kubernetes/pkg/scheduler/framework/plugins +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel +k8s.io/kubernetes/pkg/scheduler/framework/plugins/requestedtocapacityratio +k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1 k8s.io/kubernetes/pkg/scheduler/internal/cache +k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger k8s.io/kubernetes/pkg/scheduler/internal/queue +k8s.io/kubernetes/pkg/scheduler/metrics k8s.io/kubernetes/pkg/scheduler/volumebinder -k8s.io/kubernetes/pkg/apis/core/v1/helper/qos -k8s.io/kubernetes/pkg/scheduler/algorithm k8s.io/kubernetes/pkg/volume/util k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults -k8s.io/kubernetes/pkg/scheduler/algorithm/priorities -k8s.io/kubernetes/pkg/scheduler/api/validation -k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/apis/admission k8s.io/kubernetes/pkg/apis/admission/install @@ -1232,6 +1252,7 @@ k8s.io/kubernetes/pkg/master/ports k8s.io/kubernetes/pkg/proxy/apis k8s.io/kubernetes/pkg/proxy/apis/config k8s.io/kubernetes/pkg/proxy/apis/config/scheme +k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1 k8s.io/kubernetes/pkg/proxy/apis/config/validation k8s.io/kubernetes/pkg/proxy/healthcheck k8s.io/kubernetes/pkg/proxy/ipvs @@ -1241,14 +1262,11 @@ k8s.io/kubernetes/pkg/proxy/util k8s.io/kubernetes/pkg/proxy/winkernel k8s.io/kubernetes/pkg/proxy/winuserspace k8s.io/kubernetes/pkg/util/configz -k8s.io/kubernetes/pkg/util/dbus k8s.io/kubernetes/pkg/util/filesystem k8s.io/kubernetes/pkg/util/flag k8s.io/kubernetes/pkg/util/ipset k8s.io/kubernetes/pkg/util/ipvs k8s.io/kubernetes/pkg/util/netsh -k8s.io/kubernetes/pkg/version -k8s.io/kubernetes/pkg/version/verflag k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/pkg/capabilities k8s.io/kubernetes/pkg/cloudprovider/providers @@ -1260,6 +1278,7 @@ k8s.io/kubernetes/pkg/kubelet/apis/config/scheme k8s.io/kubernetes/pkg/kubelet/apis/config/validation k8s.io/kubernetes/pkg/kubelet/certificate k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap +k8s.io/kubernetes/pkg/kubelet/cm/cpuset k8s.io/kubernetes/pkg/kubelet/config k8s.io/kubernetes/pkg/kubelet/container k8s.io/kubernetes/pkg/kubelet/dockershim/remote @@ -1276,6 +1295,7 @@ k8s.io/kubernetes/pkg/volume/awsebs k8s.io/kubernetes/pkg/volume/azure_dd k8s.io/kubernetes/pkg/volume/azure_file k8s.io/kubernetes/pkg/volume/cinder +k8s.io/kubernetes/pkg/volume/csimigration k8s.io/kubernetes/pkg/volume/flexvolume k8s.io/kubernetes/pkg/volume/gcepd k8s.io/kubernetes/pkg/volume/vsphere_volume @@ -1283,8 +1303,8 @@ k8s.io/kubernetes/pkg/util/taints k8s.io/kubernetes/pkg/api/v1/resource k8s.io/kubernetes/pkg/apis/core/pods k8s.io/kubernetes/pkg/apis/core/v1 +k8s.io/kubernetes/pkg/apis/core/v1/helper/qos k8s.io/kubernetes/pkg/fieldpath -k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1 k8s.io/kubernetes/pkg/kubelet/apis/podresources k8s.io/kubernetes/pkg/kubelet/checkpointmanager k8s.io/kubernetes/pkg/kubelet/cloudresource @@ -1297,7 +1317,6 @@ k8s.io/kubernetes/pkg/kubelet/lifecycle k8s.io/kubernetes/pkg/kubelet/logs k8s.io/kubernetes/pkg/kubelet/metrics k8s.io/kubernetes/pkg/kubelet/metrics/collectors -k8s.io/kubernetes/pkg/kubelet/mountpod k8s.io/kubernetes/pkg/kubelet/network/dns k8s.io/kubernetes/pkg/kubelet/nodelease k8s.io/kubernetes/pkg/kubelet/nodestatus @@ -1326,16 +1345,15 @@ k8s.io/kubernetes/pkg/kubelet/util/manager k8s.io/kubernetes/pkg/kubelet/util/queue k8s.io/kubernetes/pkg/kubelet/util/sliceutils k8s.io/kubernetes/pkg/kubelet/volumemanager +k8s.io/kubernetes/pkg/kubelet/winstats k8s.io/kubernetes/pkg/security/apparmor k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl k8s.io/kubernetes/pkg/util/removeall k8s.io/kubernetes/pkg/util/selinux -k8s.io/kubernetes/pkg/volume/util/exec k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler k8s.io/kubernetes/pkg/volume/validation k8s.io/kubernetes/third_party/forked/golang/expansion -k8s.io/kubernetes/pkg/kubelet/winstats k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1 k8s.io/kubernetes/pkg/kubelet/cm/cpumanager k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology @@ -1361,14 +1379,32 @@ k8s.io/kubernetes/pkg/util/async k8s.io/kubernetes/pkg/util/conntrack k8s.io/kubernetes/pkg/volume/util/fs k8s.io/kubernetes/pkg/volume/util/recyclerclient -k8s.io/kubernetes/pkg/volume/csi/csiv0 k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager k8s.io/kubernetes/pkg/volume/util/fsquota k8s.io/kubernetes/pkg/util/env k8s.io/kubernetes/pkg/controller/deployment/util k8s.io/kubernetes/pkg/util/labels +k8s.io/kubernetes/pkg/scheduler/apis/config/v1 +k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1 +k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration +k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread +k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality +k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods +k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable +k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits +k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread +k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration +k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding +k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions +k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone +k8s.io/kubernetes/pkg/scheduler/internal/heap +k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics k8s.io/kubernetes/pkg/controller/volume/scheduling -k8s.io/kubernetes/pkg/scheduler/api/v1 k8s.io/kubernetes/pkg/util/resizefs k8s.io/kubernetes/pkg/apis/authentication k8s.io/kubernetes/pkg/apis/admission/v1 @@ -1393,6 +1429,7 @@ k8s.io/kubernetes/pkg/apis/certificates/v1beta1 k8s.io/kubernetes/pkg/apis/coordination/v1 k8s.io/kubernetes/pkg/apis/coordination/v1beta1 k8s.io/kubernetes/pkg/apis/discovery/v1alpha1 +k8s.io/kubernetes/pkg/apis/discovery/v1beta1 k8s.io/kubernetes/pkg/apis/events/v1beta1 k8s.io/kubernetes/pkg/apis/extensions/v1beta1 k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1 @@ -1411,7 +1448,6 @@ k8s.io/kubernetes/pkg/apis/settings/v1alpha1 k8s.io/kubernetes/pkg/apis/storage/v1 k8s.io/kubernetes/pkg/apis/storage/v1alpha1 k8s.io/kubernetes/pkg/apis/storage/v1beta1 -k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1 k8s.io/kubernetes/pkg/apis/core/validation k8s.io/kubernetes/pkg/util/slice k8s.io/kubernetes/pkg/util/ipconfig @@ -1450,9 +1486,7 @@ k8s.io/kubernetes/pkg/kubelet/volumemanager/populator k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler k8s.io/kubernetes/pkg/volume/util/operationexecutor k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state -k8s.io/kubernetes/pkg/kubelet/cm/cpuset -k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask -k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1 +k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics k8s.io/kubernetes/pkg/util/bandwidth @@ -1471,7 +1505,7 @@ k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp k8s.io/kubernetes/pkg/security/podsecuritypolicy/util k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations k8s.io/kubernetes/pkg/serviceaccount -# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.oU6q/kubernetes/staging/src/k8s.io/legacy-cloud-providers +# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/legacy-cloud-providers k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/gce k8s.io/legacy-cloud-providers/azure @@ -1479,11 +1513,11 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/vsphere/vclib -k8s.io/legacy-cloud-providers/openstack/util/mount k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/utils v0.0.0-20190801114015-581e00157fb1 => k8s.io/utils v0.0.0-20190801114015-581e00157fb1 +# k8s.io/utils v0.0.0-20191114184206-e782cd3c129f => k8s.io/utils v0.0.0-20191114184206-e782cd3c129f k8s.io/utils/net k8s.io/utils/exec +k8s.io/utils/mount k8s.io/utils/pointer k8s.io/utils/buffer k8s.io/utils/trace @@ -1491,11 +1525,11 @@ k8s.io/utils/integer k8s.io/utils/inotify k8s.io/utils/path k8s.io/utils/io -k8s.io/utils/keymutex k8s.io/utils/strings +k8s.io/utils/keymutex k8s.io/utils/nsenter k8s.io/utils/clock -# sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca => sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca +# sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 => sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 sigs.k8s.io/structured-merge-diff/fieldpath sigs.k8s.io/structured-merge-diff/merge sigs.k8s.io/structured-merge-diff/typed diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go index 9783c3bac53..841558a606f 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go @@ -35,7 +35,7 @@ type PathElement struct { // Key selects the list element which has fields matching those given. // The containing object must be an associative list with map typed // elements. - Key *value.Map + Key *value.FieldList // Value selects the list element with the given value. The containing // object must be an associative list with a primitive typed element @@ -62,7 +62,7 @@ func (e PathElement) Less(rhs PathElement) bool { if rhs.Key == nil { return true } - return e.Key.Less(rhs.Key) + return e.Key.Less(*rhs.Key) } else if rhs.Key != nil { return false } @@ -101,13 +101,11 @@ func (e PathElement) String() string { case e.FieldName != nil: return "." + *e.FieldName case e.Key != nil: - strs := make([]string, len(e.Key.Items)) - for i, k := range e.Key.Items { + strs := make([]string, len(*e.Key)) + for i, k := range *e.Key { strs[i] = fmt.Sprintf("%v=%v", k.Name, k.Value) } - // The order must be canonical, since we use the string value - // in a set structure. - sort.Strings(strs) + // Keys are supposed to be sorted. return "[" + strings.Join(strs, ",") + "]" case e.Value != nil: return fmt.Sprintf("[=%v]", e.Value) @@ -123,18 +121,19 @@ func (e PathElement) String() string { // names (type must be string) with values (type must be value.Value). If these // conditions are not met, KeyByFields will panic--it's intended for static // construction and shouldn't have user-produced values passed to it. -func KeyByFields(nameValues ...interface{}) []value.Field { +func KeyByFields(nameValues ...interface{}) *value.FieldList { if len(nameValues)%2 != 0 { panic("must have a value for every name") } - out := []value.Field{} + out := value.FieldList{} for i := 0; i < len(nameValues)-1; i += 2 { out = append(out, value.Field{ Name: nameValues[i].(string), Value: nameValues[i+1].(value.Value), }) } - return out + out.Sort() + return &out } // PathElementSet is a set of path elements. @@ -143,6 +142,12 @@ type PathElementSet struct { members sortedPathElements } +func MakePathElementSet(size int) PathElementSet { + return PathElementSet{ + members: make(sortedPathElements, 0, size), + } +} + type sortedPathElements []PathElement // Implement the sort interface; this would permit bulk creation, which would diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go index 830880fb49d..3f9eaf63dab 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go @@ -104,7 +104,7 @@ func GuessBestListPathElement(index int, item value.Value) PathElement { return PathElement{Index: &index} } - var keys []value.Field + var keys value.FieldList for _, name := range AssociativeListCandidateFieldNames { f, ok := item.MapValue.Get(name) if !ok { @@ -117,7 +117,8 @@ func GuessBestListPathElement(index int, item value.Value) PathElement { keys = append(keys, *f) } if len(keys) > 0 { - return PathElement{Key: &value.Map{Items: keys}} + keys.Sort() + return PathElement{Key: &keys} } return PathElement{Index: &index} } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go index ca80e7cca79..bdf6ce90fd1 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go @@ -88,11 +88,11 @@ func MakePath(parts ...interface{}) (Path, error) { fp = append(fp, PathElement{Index: &t}) case string: fp = append(fp, PathElement{FieldName: &t}) - case []value.Field: - if len(t) == 0 { + case *value.FieldList: + if len(*t) == 0 { return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") } - fp = append(fp, PathElement{Key: &value.Map{Items: t}}) + fp = append(fp, PathElement{Key: t}) case value.Value: // TODO: understand schema and verify that this is a set type // TODO: make a copy of t diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/pathelementmap.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/pathelementmap.go new file mode 100644 index 00000000000..f35089a1ca5 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/pathelementmap.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + + "sigs.k8s.io/structured-merge-diff/value" +) + +// PathElementValueMap is a map from PathElement to value.Value. +// +// TODO(apelisse): We have multiple very similar implementation of this +// for PathElementSet and SetNodeMap, so we could probably share the +// code. +type PathElementValueMap struct { + members sortedPathElementValues +} + +func MakePathElementValueMap(size int) PathElementValueMap { + return PathElementValueMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +type pathElementValue struct { + PathElement PathElement + Value value.Value +} + +type sortedPathElementValues []pathElementValue + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spev sortedPathElementValues) Len() int { return len(spev) } +func (spev sortedPathElementValues) Less(i, j int) bool { + return spev[i].PathElement.Less(spev[j].PathElement) +} +func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } + +// Insert adds the pathelement and associated value in the map. +func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pathElementValue{pe, v}) + return + } + if s.members[loc].PathElement.Equals(pe) { + return + } + s.members = append(s.members, pathElementValue{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pathElementValue{pe, v} +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + return value.Value{}, false + } + if s.members[loc].PathElement.Equals(pe) { + return s.members[loc].Value, true + } + return value.Value{}, false +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go index a0338fa66ef..e491fce1e7d 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go @@ -83,14 +83,18 @@ func DeserializePathElement(s string) (PathElement, error) { case peKeySepBytes[0]: iter := readPool.BorrowIterator(b) defer readPool.ReturnIterator(iter) - v, err := value.ReadJSONIter(iter) - if err != nil { - return PathElement{}, err - } - if v.MapValue == nil { - return PathElement{}, fmt.Errorf("expected key value pairs but got %#v", v) - } - return PathElement{Key: v.MapValue}, nil + fields := value.FieldList{} + iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool { + v, err := value.ReadJSONIter(iter) + if err != nil { + iter.Error = err + return false + } + fields = append(fields, value.Field{Name: key, Value: v}) + return true + }) + fields.Sort() + return PathElement{Key: &fields}, iter.Error case peIndexSepBytes[0]: i, err := strconv.Atoi(s[2:]) if err != nil { @@ -129,8 +133,15 @@ func serializePathElementToWriter(w io.Writer, pe PathElement) error { if _, err := stream.Write(peKeySepBytes); err != nil { return err } - v := value.Value{MapValue: pe.Key} - v.WriteJSONStream(stream) + stream.WriteObjectStart() + for i, field := range *pe.Key { + if i > 0 { + stream.WriteMore() + } + stream.WriteObjectField(field.Name) + field.Value.WriteJSONStream(stream) + } + stream.WriteObjectEnd() case pe.Value != nil: if _, err := stream.Write(peValueSepBytes); err != nil { return err diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go index 96c9751ac78..75d599077c9 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go @@ -41,12 +41,12 @@ func (s *Updater) EnableUnionFeature() { s.enableUnions = true } -func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, error) { +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { conflicts := fieldpath.ManagedFields{} removed := fieldpath.ManagedFields{} compare, err := oldObject.Compare(newObject) if err != nil { - return nil, fmt.Errorf("failed to compare objects: %v", err) + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } versions := map[fieldpath.APIVersion]*typed.Comparison{ @@ -66,7 +66,7 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa delete(managers, manager) continue } - return nil, fmt.Errorf("failed to convert old object: %v", err) + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) } versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) if err != nil { @@ -74,11 +74,11 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa delete(managers, manager) continue } - return nil, fmt.Errorf("failed to convert new object: %v", err) + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) } compare, err = versionedOldObject.Compare(versionedNewObject) if err != nil { - return nil, fmt.Errorf("failed to compare objects: %v", err) + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } versions[managerSet.APIVersion()] = compare } @@ -94,7 +94,7 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa } if !force && len(conflicts) != 0 { - return nil, ConflictsFromManagers(conflicts) + return nil, nil, ConflictsFromManagers(conflicts) } for manager, conflictSet := range conflicts { @@ -111,7 +111,7 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa } } - return managers, nil + return managers, compare, nil } // Update is the method you should call once you've merged your final @@ -128,14 +128,10 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp } } managers = shallowCopyManagers(managers) - managers, err = s.update(liveObject, newObject, version, managers, manager, true) + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err } - compare, err := liveObject.Compare(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to compare live and new objects: %v", err) - } if _, ok := managers[manager]; !ok { managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) } @@ -182,7 +178,7 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, err = s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go index 4338696aa56..5a2859e455b 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go @@ -16,9 +16,17 @@ limitations under the License. package schema +import "sync" + // Schema is a list of named types. +// +// Schema types are indexed in a map before the first search so this type +// should be considered immutable. type Schema struct { Types []TypeDef `yaml:"types,omitempty"` + + once sync.Once + m map[string]TypeDef } // A TypeSpecifier references a particular type in a schema. @@ -92,6 +100,9 @@ const ( // // Maps may also represent a type which is composed of a number of different fields. // Each field has a name and a type. +// +// Fields are indexed in a map before the first search so this type +// should be considered immutable. type Map struct { // Each struct field appears exactly once in this list. The order in // this list defines the canonical field ordering. @@ -117,6 +128,22 @@ type Map struct { // The default behavior for maps is `separable`; it's permitted to // leave this unset to get the default behavior. ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + once sync.Once + m map[string]StructField +} + +// FindField is a convenience function that returns the referenced StructField, +// if it exists, or (nil, false) if it doesn't. +func (m *Map) FindField(name string) (StructField, bool) { + m.once.Do(func() { + m.m = make(map[string]StructField, len(m.Fields)) + for _, field := range m.Fields { + m.m[field.Name] = field + } + }) + sf, ok := m.m[name] + return sf, ok } // UnionFields are mapping between the fields that are part of the union and @@ -204,13 +231,15 @@ type List struct { // FindNamedType is a convenience function that returns the referenced TypeDef, // if it exists, or (nil, false) if it doesn't. -func (s Schema) FindNamedType(name string) (TypeDef, bool) { - for _, t := range s.Types { - if t.Name == name { - return t, true +func (s *Schema) FindNamedType(name string) (TypeDef, bool) { + s.once.Do(func() { + s.m = make(map[string]TypeDef, len(s.Types)) + for _, t := range s.Types { + s.m[t.Name] = t } - } - return TypeDef{}, false + }) + t, ok := s.m[name] + return t, ok } // Resolve is a convenience function which returns the atom referenced, whether diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go index a7c12ccb001..0911ce0b482 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go @@ -97,9 +97,9 @@ func (ef errorFormatter) prefixError(prefix string, err error) ValidationErrors } type atomHandler interface { - doScalar(schema.Scalar) ValidationErrors - doList(schema.List) ValidationErrors - doMap(schema.Map) ValidationErrors + doScalar(*schema.Scalar) ValidationErrors + doList(*schema.List) ValidationErrors + doMap(*schema.Map) ValidationErrors errorf(msg string, args ...interface{}) ValidationErrors } @@ -130,11 +130,11 @@ func deduceAtom(a schema.Atom, v *value.Value) schema.Atom { func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErrors { switch { case a.Map != nil: - return ah.doMap(*a.Map) + return ah.doMap(a.Map) case a.Scalar != nil: - return ah.doScalar(*a.Scalar) + return ah.doScalar(a.Scalar) case a.List != nil: - return ah.doList(*a.List) + return ah.doList(a.List) } name := "inlined" @@ -145,14 +145,14 @@ func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErro return ah.errorf("schema error: invalid atom: %v", name) } -func (ef errorFormatter) validateScalar(t schema.Scalar, v *value.Value, prefix string) (errs ValidationErrors) { +func (ef errorFormatter) validateScalar(t *schema.Scalar, v *value.Value, prefix string) (errs ValidationErrors) { if v == nil { return nil } if v.Null { return nil } - switch t { + switch *t { case schema.Numeric: if v.FloatValue == nil && v.IntValue == nil { // TODO: should the schema separate int and float? @@ -195,7 +195,7 @@ func mapValue(val value.Value) (*value.Map, error) { } } -func keyedAssociativeListItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.Null { // For now, the keys are required which means that null entries @@ -205,7 +205,7 @@ func keyedAssociativeListItemToPathElement(list schema.List, index int, child va if child.MapValue == nil { return pe, errors.New("associative list with keys may not have non-map elements") } - keyMap := &value.Map{} + keyMap := value.FieldList{} for _, fieldName := range list.Keys { var fieldValue value.Value field, ok := child.MapValue.Get(fieldName) @@ -215,13 +215,14 @@ func keyedAssociativeListItemToPathElement(list schema.List, index int, child va // Treat keys as required. return pe, fmt.Errorf("associative list with keys has an element that omits key field %q", fieldName) } - keyMap.Set(fieldName, fieldValue) + keyMap = append(keyMap, value.Field{Name: fieldName, Value: fieldValue}) } - pe.Key = keyMap + keyMap.Sort() + pe.Key = &keyMap return pe, nil } -func setItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.MapValue != nil: @@ -241,7 +242,7 @@ func setItemToPathElement(list schema.List, index int, child value.Value) (field } } -func listItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func listItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { if list.ElementRelationship == schema.Associative { if len(list.Keys) > 0 { return keyedAssociativeListItemToPathElement(list, index, child) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go index 27bc8b24e37..bb92f84b499 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go @@ -104,7 +104,7 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t schema.Scalar) (errs ValidationErrors) { +func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { errs = append(errs, w.validateScalar(t, w.lhs, "lhs: ")...) errs = append(errs, w.validateScalar(t, w.rhs, "rhs: ")...) if len(errs) > 0 { @@ -158,7 +158,7 @@ func (w *mergingWalker) derefMap(prefix string, v *value.Value, dest **value.Map return nil } -func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (errs ValidationErrors) { +func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs *value.List) (errs ValidationErrors) { out := &value.List{} // TODO: ordering is totally wrong. @@ -168,8 +168,9 @@ func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (err rhsOrder := []fieldpath.PathElement{} // First, collect all RHS children. - observedRHS := map[string]value.Value{} + var observedRHS fieldpath.PathElementValueMap if rhs != nil { + observedRHS = fieldpath.MakePathElementValueMap(len(rhs.Items)) for i, child := range rhs.Items { pe, err := listItemToPathElement(t, i, child) if err != nil { @@ -179,18 +180,18 @@ func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (err // this element. continue } - keyStr := pe.String() - if _, found := observedRHS[keyStr]; found { - errs = append(errs, w.errorf("rhs: duplicate entries for key %v", keyStr)...) + if _, ok := observedRHS.Get(pe); ok { + errs = append(errs, w.errorf("rhs: duplicate entries for key %v", pe.String())...) } - observedRHS[keyStr] = child + observedRHS.Insert(pe, child) rhsOrder = append(rhsOrder, pe) } } // Then merge with LHS children. - observedLHS := map[string]struct{}{} + var observedLHS fieldpath.PathElementSet if lhs != nil { + observedLHS = fieldpath.MakePathElementSet(len(lhs.Items)) for i, child := range lhs.Items { pe, err := listItemToPathElement(t, i, child) if err != nil { @@ -200,15 +201,14 @@ func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (err // this element. continue } - keyStr := pe.String() - if _, found := observedLHS[keyStr]; found { - errs = append(errs, w.errorf("lhs: duplicate entries for key %v", keyStr)...) + if observedLHS.Has(pe) { + errs = append(errs, w.errorf("lhs: duplicate entries for key %v", pe.String())...) continue } - observedLHS[keyStr] = struct{}{} + observedLHS.Insert(pe) w2 := w.prepareDescent(pe, t.ElementType) w2.lhs = &child - if rchild, ok := observedRHS[keyStr]; ok { + if rchild, ok := observedRHS.Get(pe); ok { w2.rhs = &rchild } if newErrs := w2.merge(); len(newErrs) > 0 { @@ -217,22 +217,22 @@ func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (err out.Items = append(out.Items, *w2.out) } w.finishDescent(w2) - // Keep track of children that have been handled - delete(observedRHS, keyStr) } } - for _, rhsToCheck := range rhsOrder { - if unmergedChild, ok := observedRHS[rhsToCheck.String()]; ok { - w2 := w.prepareDescent(rhsToCheck, t.ElementType) - w2.rhs = &unmergedChild - if newErrs := w2.merge(); len(newErrs) > 0 { - errs = append(errs, newErrs...) - } else if w2.out != nil { - out.Items = append(out.Items, *w2.out) - } - w.finishDescent(w2) + for _, pe := range rhsOrder { + if observedLHS.Has(pe) { + continue + } + value, _ := observedRHS.Get(pe) + w2 := w.prepareDescent(pe, t.ElementType) + w2.rhs = &value + if newErrs := w2.merge(); len(newErrs) > 0 { + errs = append(errs, newErrs...) + } else if w2.out != nil { + out.Items = append(out.Items, *w2.out) } + w.finishDescent(w2) } if len(out.Items) > 0 { @@ -255,7 +255,7 @@ func (w *mergingWalker) derefList(prefix string, v *value.Value, dest **value.Li return nil } -func (w *mergingWalker) doList(t schema.List) (errs ValidationErrors) { +func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) { var lhs, rhs *value.List w.derefList("lhs: ", w.lhs, &lhs) w.derefList("rhs: ", w.rhs, &rhs) @@ -280,23 +280,15 @@ func (w *mergingWalker) doList(t schema.List) (errs ValidationErrors) { return errs } -func (w *mergingWalker) visitMapItems(t schema.Map, lhs, rhs *value.Map) (errs ValidationErrors) { +func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs *value.Map) (errs ValidationErrors) { out := &value.Map{} - fieldTypes := map[string]schema.TypeRef{} - for i := range t.Fields { - // I don't want to use the loop variable since a reference - // might outlive the loop iteration (in an error message). - f := t.Fields[i] - fieldTypes[f.Name] = f.Type - } - if lhs != nil { for i := range lhs.Items { litem := &lhs.Items[i] fieldType := t.ElementType - if ft, ok := fieldTypes[litem.Name]; ok { - fieldType = ft + if sf, ok := t.FindField(litem.Name); ok { + fieldType = sf.Type } w2 := w.prepareDescent(fieldpath.PathElement{FieldName: &litem.Name}, fieldType) w2.lhs = &litem.Value @@ -324,8 +316,8 @@ func (w *mergingWalker) visitMapItems(t schema.Map, lhs, rhs *value.Map) (errs V } fieldType := t.ElementType - if ft, ok := fieldTypes[ritem.Name]; ok { - fieldType = ft + if sf, ok := t.FindField(ritem.Name); ok { + fieldType = sf.Type } w2 := w.prepareDescent(fieldpath.PathElement{FieldName: &ritem.Name}, fieldType) w2.rhs = &ritem.Value @@ -344,7 +336,7 @@ func (w *mergingWalker) visitMapItems(t schema.Map, lhs, rhs *value.Map) (errs V return errs } -func (w *mergingWalker) doMap(t schema.Map) (errs ValidationErrors) { +func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) { var lhs, rhs *value.Map w.derefMap("lhs: ", w.lhs, &lhs) w.derefMap("rhs: ", w.rhs, &rhs) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go index 32e4b18b136..a6b748411fb 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go @@ -38,9 +38,9 @@ func removeItemsWithSchema(value *value.Value, toRemove *fieldpath.Set, schema * // will be a descent. It modifies w.inLeaf. func (w *removingWalker) doLeaf() ValidationErrors { return nil } -func (w *removingWalker) doScalar(t schema.Scalar) ValidationErrors { return nil } +func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { return nil } -func (w *removingWalker) doList(t schema.List) (errs ValidationErrors) { +func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { l := w.value.ListValue // If list is null, empty, or atomic just return @@ -70,7 +70,7 @@ func (w *removingWalker) doList(t schema.List) (errs ValidationErrors) { return nil } -func (w *removingWalker) doMap(t schema.Map) ValidationErrors { +func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { m := w.value.MapValue // If map is null, empty, or atomic just return diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go index 5db5018e5a1..9c61b84505b 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go @@ -18,6 +18,7 @@ package typed import ( "fmt" + "strings" "sync" "sigs.k8s.io/structured-merge-diff/fieldpath" @@ -116,7 +117,7 @@ func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - c.Merged, err = merge(&tv, rhs, func(w *mergingWalker) { + _, err = merge(&tv, rhs, func(w *mergingWalker) { if w.lhs == nil { c.Added.Insert(w.path) } else if w.rhs == nil { @@ -126,8 +127,6 @@ func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { // Need to implement equality check on the value type. c.Modified.Insert(w.path) } - - ruleKeepRHS(w) }, func(w *mergingWalker) { if w.lhs == nil { c.Added.Insert(w.path) @@ -268,10 +267,6 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) // No field will appear in more than one of the three fieldsets. If all of the // fieldsets are empty, then the objects must have been equal. type Comparison struct { - // Merged is the result of merging the two objects, as explained in the - // comments on TypedValue.Merge(). - Merged *TypedValue - // Removed contains any fields removed by rhs (the right-hand-side // object in the comparison). Removed *fieldpath.Set @@ -289,15 +284,15 @@ func (c *Comparison) IsSame() bool { // String returns a human readable version of the comparison. func (c *Comparison) String() string { - str := fmt.Sprintf("- Merged Object:\n%v\n", c.Merged.AsValue()) + bld := strings.Builder{} if !c.Modified.Empty() { - str += fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified) + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) } if !c.Added.Empty() { - str += fmt.Sprintf("- Added Fields:\n%v\n", c.Added) + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) } if !c.Removed.Empty() { - str += fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed) + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) } - return str + return bld.String() } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go index 0a763247308..338c058310b 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go @@ -130,7 +130,7 @@ func (v *validatingObjectWalker) doNode() { } } -func (v *validatingObjectWalker) doScalar(t schema.Scalar) ValidationErrors { +func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors { if errs := v.validateScalar(t, &v.value, ""); len(errs) > 0 { return errs } @@ -141,8 +141,8 @@ func (v *validatingObjectWalker) doScalar(t schema.Scalar) ValidationErrors { return nil } -func (v *validatingObjectWalker) visitListItems(t schema.List, list *value.List) (errs ValidationErrors) { - observedKeys := map[string]struct{}{} +func (v *validatingObjectWalker) visitListItems(t *schema.List, list *value.List) (errs ValidationErrors) { + observedKeys := fieldpath.MakePathElementSet(len(list.Items)) for i, child := range list.Items { pe, err := listItemToPathElement(t, i, child) if err != nil { @@ -152,11 +152,10 @@ func (v *validatingObjectWalker) visitListItems(t schema.List, list *value.List) // this element. continue } - keyStr := pe.String() - if _, found := observedKeys[keyStr]; found { - errs = append(errs, v.errorf("duplicate entries for key %v", keyStr)...) + if observedKeys.Has(pe) { + errs = append(errs, v.errorf("duplicate entries for key %v", pe.String())...) } - observedKeys[keyStr] = struct{}{} + observedKeys.Insert(pe) v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.validate()...) @@ -167,7 +166,7 @@ func (v *validatingObjectWalker) visitListItems(t schema.List, list *value.List) return errs } -func (v *validatingObjectWalker) doList(t schema.List) (errs ValidationErrors) { +func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) { list, err := listValue(v.value) if err != nil { return v.error(err) @@ -186,21 +185,13 @@ func (v *validatingObjectWalker) doList(t schema.List) (errs ValidationErrors) { return errs } -func (v *validatingObjectWalker) visitMapItems(t schema.Map, m *value.Map) (errs ValidationErrors) { - fieldTypes := map[string]schema.TypeRef{} - for i := range t.Fields { - // I don't want to use the loop variable since a reference - // might outlive the loop iteration (in an error message). - f := t.Fields[i] - fieldTypes[f.Name] = f.Type - } - +func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m *value.Map) (errs ValidationErrors) { for i := range m.Items { item := &m.Items[i] pe := fieldpath.PathElement{FieldName: &item.Name} - if tr, ok := fieldTypes[item.Name]; ok { - v2 := v.prepareDescent(pe, tr) + if sf, ok := t.FindField(item.Name); ok { + v2 := v.prepareDescent(pe, sf.Type) v2.value = item.Value errs = append(errs, v2.validate()...) v.finishDescent(v2) @@ -215,7 +206,7 @@ func (v *validatingObjectWalker) visitMapItems(t schema.Map, m *value.Map) (errs return errs } -func (v *validatingObjectWalker) doMap(t schema.Map) (errs ValidationErrors) { +func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) { m, err := mapValue(v.value) if err != nil { return v.error(err) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/value/value.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/value/value.go index 1ce63e1c970..942d797246a 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/value/value.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/value/value.go @@ -36,86 +36,146 @@ type Value struct { // Equals returns true iff the two values are equal. func (v Value) Equals(rhs Value) bool { - return !v.Less(rhs) && !rhs.Less(v) + if v.FloatValue != nil || rhs.FloatValue != nil { + var lf float64 + if v.FloatValue != nil { + lf = float64(*v.FloatValue) + } else if v.IntValue != nil { + lf = float64(*v.IntValue) + } else { + return false + } + var rf float64 + if rhs.FloatValue != nil { + rf = float64(*rhs.FloatValue) + } else if rhs.IntValue != nil { + rf = float64(*rhs.IntValue) + } else { + return false + } + return lf == rf + } + if v.IntValue != nil { + if rhs.IntValue != nil { + return *v.IntValue == *rhs.IntValue + } + return false + } + if v.StringValue != nil { + if rhs.StringValue != nil { + return *v.StringValue == *rhs.StringValue + } + return false + } + if v.BooleanValue != nil { + if rhs.BooleanValue != nil { + return *v.BooleanValue == *rhs.BooleanValue + } + return false + } + if v.ListValue != nil { + if rhs.ListValue != nil { + return v.ListValue.Equals(rhs.ListValue) + } + return false + } + if v.MapValue != nil { + if rhs.MapValue != nil { + return v.MapValue.Equals(rhs.MapValue) + } + return false + } + if v.Null { + if rhs.Null { + return true + } + return false + } + // No field is set, on either objects. + return true } // Less provides a total ordering for Value (so that they can be sorted, even // if they are of different types). func (v Value) Less(rhs Value) bool { + return v.Compare(rhs) == -1 +} + +// Compare provides a total ordering for Value (so that they can be +// sorted, even if they are of different types). The result will be 0 if +// v==rhs, -1 if v < rhs, and +1 if v > rhs. +func (v Value) Compare(rhs Value) int { if v.FloatValue != nil { if rhs.FloatValue == nil { // Extra: compare floats and ints numerically. if rhs.IntValue != nil { - return float64(*v.FloatValue) < float64(*rhs.IntValue) + return v.FloatValue.Compare(Float(*rhs.IntValue)) } - return true + return -1 } - return *v.FloatValue < *rhs.FloatValue + return v.FloatValue.Compare(*rhs.FloatValue) } else if rhs.FloatValue != nil { // Extra: compare floats and ints numerically. if v.IntValue != nil { - return float64(*v.IntValue) < float64(*rhs.FloatValue) + return Float(*v.IntValue).Compare(*rhs.FloatValue) } - return false + return 1 } if v.IntValue != nil { if rhs.IntValue == nil { - return true + return -1 } - return *v.IntValue < *rhs.IntValue + return v.IntValue.Compare(*rhs.IntValue) } else if rhs.IntValue != nil { - return false + return 1 } if v.StringValue != nil { if rhs.StringValue == nil { - return true + return -1 } - return *v.StringValue < *rhs.StringValue + return strings.Compare(string(*v.StringValue), string(*rhs.StringValue)) } else if rhs.StringValue != nil { - return false + return 1 } if v.BooleanValue != nil { if rhs.BooleanValue == nil { - return true + return -1 } - if *v.BooleanValue == *rhs.BooleanValue { - return false - } - return *v.BooleanValue == false + return v.BooleanValue.Compare(*rhs.BooleanValue) } else if rhs.BooleanValue != nil { - return false + return 1 } if v.ListValue != nil { if rhs.ListValue == nil { - return true + return -1 } - return v.ListValue.Less(rhs.ListValue) + return v.ListValue.Compare(rhs.ListValue) } else if rhs.ListValue != nil { - return false + return 1 } if v.MapValue != nil { if rhs.MapValue == nil { - return true + return -1 } - return v.MapValue.Less(rhs.MapValue) + return v.MapValue.Compare(rhs.MapValue) } else if rhs.MapValue != nil { - return false + return 1 } if v.Null { if !rhs.Null { - return true + return -1 } - return false + return 0 } else if rhs.Null { - return false + return 1 } // Invalid Value-- nothing is set. - return false + return 0 } type Int int64 @@ -123,40 +183,141 @@ type Float float64 type String string type Boolean bool +// Compare compares integers. The result will be 0 if i==rhs, -1 if i < +// rhs, and +1 if i > rhs. +func (i Int) Compare(rhs Int) int { + if i > rhs { + return 1 + } else if i < rhs { + return -1 + } + return 0 +} + +// Compare compares floats. The result will be 0 if f==rhs, -1 if f < +// rhs, and +1 if f > rhs. +func (f Float) Compare(rhs Float) int { + if f > rhs { + return 1 + } else if f < rhs { + return -1 + } + return 0 +} + +// Compare compares booleans. The result will be 0 if b==rhs, -1 if b < +// rhs, and +1 if b > rhs. +func (b Boolean) Compare(rhs Boolean) int { + if b == rhs { + return 0 + } else if b == false { + return -1 + } + return 1 +} + // Field is an individual key-value pair. type Field struct { Name string Value Value } +// FieldList is a list of key-value pairs. Each field is expected to +// have a different name. +type FieldList []Field + +// Sort sorts the field list by Name. +func (f FieldList) Sort() { + if len(f) < 2 { + return + } + if len(f) == 2 { + if f[1].Name < f[0].Name { + f[0], f[1] = f[1], f[0] + } + return + } + sort.SliceStable(f, func(i, j int) bool { + return f[i].Name < f[j].Name + }) +} + +// Less compares two lists lexically. +func (f FieldList) Less(rhs FieldList) bool { + return f.Compare(rhs) == -1 +} + +// Less compares two lists lexically. The result will be 0 if f==rhs, -1 +// if f < rhs, and +1 if f > rhs. +func (f FieldList) Compare(rhs FieldList) int { + i := 0 + for { + if i >= len(f) && i >= len(rhs) { + // Maps are the same length and all items are equal. + return 0 + } + if i >= len(f) { + // F is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 { + return c + } + if c := f[i].Value.Compare(rhs[i].Value); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + // List is a list of items. type List struct { Items []Value } +// Equals compares two lists lexically. +func (l *List) Equals(rhs *List) bool { + if len(l.Items) != len(rhs.Items) { + return false + } + + for i, lv := range l.Items { + if !lv.Equals(rhs.Items[i]) { + return false + } + } + return true +} + // Less compares two lists lexically. func (l *List) Less(rhs *List) bool { + return l.Compare(rhs) == -1 +} + +// Compare compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func (l *List) Compare(rhs *List) int { i := 0 for { if i >= len(l.Items) && i >= len(rhs.Items) { // Lists are the same length and all items are equal. - return false + return 0 } if i >= len(l.Items) { // LHS is shorter. - return true + return -1 } if i >= len(rhs.Items) { // RHS is shorter. - return false - } - if l.Items[i].Less(rhs.Items[i]) { - // LHS is less; return - return true + return 1 } - if rhs.Items[i].Less(l.Items[i]) { - // RHS is less; return - return false + if c := l.Items[i].Compare(rhs.Items[i]); c != 0 { + return c } // The items are equal; continue. i++ @@ -191,8 +352,30 @@ func (m *Map) computeOrder() []int { return m.order } +// Equals compares two maps lexically. +func (m *Map) Equals(rhs *Map) bool { + if len(m.Items) != len(rhs.Items) { + return false + } + for _, lfield := range m.Items { + rfield, ok := rhs.Get(lfield.Name) + if !ok { + return false + } + if !lfield.Value.Equals(rfield.Value) { + return false + } + } + return true +} + // Less compares two maps lexically. func (m *Map) Less(rhs *Map) bool { + return m.Compare(rhs) == -1 +} + +// Compare compares two maps lexically. +func (m *Map) Compare(rhs *Map) int { var noAllocL, noAllocR [2]int var morder, rorder []int @@ -238,28 +421,22 @@ func (m *Map) Less(rhs *Map) bool { for { if i >= len(morder) && i >= len(rorder) { // Maps are the same length and all items are equal. - return false + return 0 } if i >= len(morder) { // LHS is shorter. - return true + return -1 } if i >= len(rorder) { // RHS is shorter. - return false + return 1 } fa, fb := &m.Items[morder[i]], &rhs.Items[rorder[i]] - if fa.Name != fb.Name { - // the map having the field name that sorts lexically less is "less" - return fa.Name < fb.Name + if c := strings.Compare(fa.Name, fb.Name); c != 0 { + return c } - if fa.Value.Less(fb.Value) { - // LHS is less; return - return true - } - if fb.Value.Less(fa.Value) { - // RHS is less; return - return false + if c := fa.Value.Compare(fb.Value); c != 0 { + return c } // The items are equal; continue. i++ From b4c8bbb12c39ad73fc2e1abb90ed922941428842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:23:47 +0100 Subject: [PATCH 3/9] Fixes around metrics/ handler --- .../cloudprovider/gce/gce_metrics.go | 9 +- cluster-autoscaler/main.go | 4 +- cluster-autoscaler/metrics/metrics.go | 119 +++++++++--------- 3 files changed, 67 insertions(+), 65 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/gce/gce_metrics.go b/cluster-autoscaler/cloudprovider/gce/gce_metrics.go index 2aeb9e3a1ac..a714c70418f 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_metrics.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_metrics.go @@ -17,7 +17,8 @@ limitations under the License. package gce import ( - "github.com/prometheus/client_golang/prometheus" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) const ( @@ -26,8 +27,8 @@ const ( var ( /**** Metrics related to GCE API usage ****/ - requestCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ + requestCounter = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "gce_request_count", Help: "Counter of GCE API requests for each verb and API resource.", @@ -37,7 +38,7 @@ var ( // RegisterMetrics registers all GCE metrics. func RegisterMetrics() { - prometheus.MustRegister(requestCounter) + legacyregistry.MustRegister(requestCounter) } // registerRequest registers request to GCE API. diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 1bc403d9cf3..90ca52a9a65 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -29,7 +29,6 @@ import ( "syscall" "time" - "github.com/prometheus/client_golang/prometheus" "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,6 +52,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" kube_flag "k8s.io/component-base/cli/flag" componentbaseconfig "k8s.io/component-base/config" + "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" ) @@ -368,7 +368,7 @@ func main() { klog.V(1).Infof("Cluster Autoscaler %s", version.ClusterAutoscalerVersion) go func() { - http.Handle("/metrics", prometheus.Handler()) + http.Handle("/metrics", legacyregistry.Handler()) http.Handle("/health-check", healthCheck) err := http.ListenAndServe(*address, nil) klog.Fatalf("Failed to start metrics: %v", err) diff --git a/cluster-autoscaler/metrics/metrics.go b/cluster-autoscaler/metrics/metrics.go index 2e895016537..44621aba34b 100644 --- a/cluster-autoscaler/metrics/metrics.go +++ b/cluster-autoscaler/metrics/metrics.go @@ -21,9 +21,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" - _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client-go metrics registration + _ "k8s.io/component-base/metrics/prometheus/restclient" // for client-go metrics registration - "github.com/prometheus/client_golang/prometheus" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" ) @@ -92,32 +93,32 @@ const ( var ( /**** Metrics related to cluster state ****/ - clusterSafeToAutoscale = prometheus.NewGauge( - prometheus.GaugeOpts{ + clusterSafeToAutoscale = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "cluster_safe_to_autoscale", Help: "Whether or not cluster is healthy enough for autoscaling. 1 if it is, 0 otherwise.", }, ) - nodesCount = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + nodesCount = k8smetrics.NewGaugeVec( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "nodes_count", Help: "Number of nodes in cluster.", }, []string{"state"}, ) - nodeGroupsCount = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + nodeGroupsCount = k8smetrics.NewGaugeVec( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "node_groups_count", Help: "Number of node groups managed by CA.", }, []string{"node_group_type"}, ) - unschedulablePodsCount = prometheus.NewGauge( - prometheus.GaugeOpts{ + unschedulablePodsCount = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "unschedulable_pods_count", Help: "Number of unschedulable pods in the cluster.", @@ -125,16 +126,16 @@ var ( ) /**** Metrics related to autoscaler execution ****/ - lastActivity = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + lastActivity = k8smetrics.NewGaugeVec( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "last_activity", Help: "Last time certain part of CA logic executed.", }, []string{"activity"}, ) - functionDuration = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + functionDuration = k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ Namespace: caNamespace, Name: "function_duration_seconds", Help: "Time taken by various parts of CA main loop.", @@ -142,8 +143,8 @@ var ( }, []string{"function"}, ) - functionDurationSummary = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ + functionDurationSummary = k8smetrics.NewSummaryVec( + &k8smetrics.SummaryOpts{ Namespace: caNamespace, Name: "function_duration_quantile_seconds", Help: "Quantiles of time taken by various parts of CA main loop.", @@ -152,72 +153,72 @@ var ( ) /**** Metrics related to autoscaler operations ****/ - errorsCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ + errorsCount = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "errors_total", Help: "The number of CA loops failed due to an error.", }, []string{"type"}, ) - scaleUpCount = prometheus.NewCounter( - prometheus.CounterOpts{ + scaleUpCount = k8smetrics.NewCounter( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "scaled_up_nodes_total", Help: "Number of nodes added by CA.", }, ) - gpuScaleUpCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ + gpuScaleUpCount = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "scaled_up_gpu_nodes_total", Help: "Number of GPU nodes added by CA, by GPU name.", }, []string{"gpu_name"}, ) - failedScaleUpCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ + failedScaleUpCount = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "failed_scale_ups_total", Help: "Number of times scale-up operation has failed.", }, []string{"reason"}, ) - scaleDownCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ + scaleDownCount = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "scaled_down_nodes_total", Help: "Number of nodes removed by CA.", }, []string{"reason"}, ) - gpuScaleDownCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ + gpuScaleDownCount = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "scaled_down_gpu_nodes_total", Help: "Number of GPU nodes removed by CA, by reason and GPU name.", }, []string{"reason", "gpu_name"}, ) - evictionsCount = prometheus.NewCounter( - prometheus.CounterOpts{ + evictionsCount = k8smetrics.NewCounter( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "evicted_pods_total", Help: "Number of pods evicted by CA", }, ) - unneededNodesCount = prometheus.NewGauge( - prometheus.GaugeOpts{ + unneededNodesCount = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "unneeded_nodes_count", Help: "Number of nodes currently considered unneeded by CA.", }, ) - scaleDownInCooldown = prometheus.NewGauge( - prometheus.GaugeOpts{ + scaleDownInCooldown = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "scale_down_in_cooldown", Help: "Whether or not the scale down is in cooldown. 1 if its, 0 otherwise.", @@ -225,24 +226,24 @@ var ( ) /**** Metrics related to NodeAutoprovisioning ****/ - napEnabled = prometheus.NewGauge( - prometheus.GaugeOpts{ + napEnabled = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ Namespace: caNamespace, Name: "nap_enabled", Help: "Whether or not Node Autoprovisioning is enabled. 1 if it is, 0 otherwise.", }, ) - nodeGroupCreationCount = prometheus.NewCounter( - prometheus.CounterOpts{ + nodeGroupCreationCount = k8smetrics.NewCounter( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "created_node_groups_total", Help: "Number of node groups created by Node Autoprovisioning.", }, ) - nodeGroupDeletionCount = prometheus.NewCounter( - prometheus.CounterOpts{ + nodeGroupDeletionCount = k8smetrics.NewCounter( + &k8smetrics.CounterOpts{ Namespace: caNamespace, Name: "deleted_node_groups_total", Help: "Number of node groups deleted by Node Autoprovisioning.", @@ -252,25 +253,25 @@ var ( // RegisterAll registers all metrics. func RegisterAll() { - prometheus.MustRegister(clusterSafeToAutoscale) - prometheus.MustRegister(nodesCount) - prometheus.MustRegister(nodeGroupsCount) - prometheus.MustRegister(unschedulablePodsCount) - prometheus.MustRegister(lastActivity) - prometheus.MustRegister(functionDuration) - prometheus.MustRegister(functionDurationSummary) - prometheus.MustRegister(errorsCount) - prometheus.MustRegister(scaleUpCount) - prometheus.MustRegister(gpuScaleUpCount) - prometheus.MustRegister(failedScaleUpCount) - prometheus.MustRegister(scaleDownCount) - prometheus.MustRegister(gpuScaleDownCount) - prometheus.MustRegister(evictionsCount) - prometheus.MustRegister(unneededNodesCount) - prometheus.MustRegister(scaleDownInCooldown) - prometheus.MustRegister(napEnabled) - prometheus.MustRegister(nodeGroupCreationCount) - prometheus.MustRegister(nodeGroupDeletionCount) + legacyregistry.MustRegister(clusterSafeToAutoscale) + legacyregistry.MustRegister(nodesCount) + legacyregistry.MustRegister(nodeGroupsCount) + legacyregistry.MustRegister(unschedulablePodsCount) + legacyregistry.MustRegister(lastActivity) + legacyregistry.MustRegister(functionDuration) + legacyregistry.MustRegister(functionDurationSummary) + legacyregistry.MustRegister(errorsCount) + legacyregistry.MustRegister(scaleUpCount) + legacyregistry.MustRegister(gpuScaleUpCount) + legacyregistry.MustRegister(failedScaleUpCount) + legacyregistry.MustRegister(scaleDownCount) + legacyregistry.MustRegister(gpuScaleDownCount) + legacyregistry.MustRegister(evictionsCount) + legacyregistry.MustRegister(unneededNodesCount) + legacyregistry.MustRegister(scaleDownInCooldown) + legacyregistry.MustRegister(napEnabled) + legacyregistry.MustRegister(nodeGroupCreationCount) + legacyregistry.MustRegister(nodeGroupDeletionCount) } // UpdateDurationFromStart records the duration of the step identified by the From 3470a9f1c072637bd9f94c6125c6199a3d9e6626 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:14:25 +0100 Subject: [PATCH 4/9] Migrate scheduler creation to scheduler.New() --- cluster-autoscaler/simulator/cluster.go | 2 +- cluster-autoscaler/simulator/predicates.go | 103 ++++++++------------- 2 files changed, 40 insertions(+), 65 deletions(-) diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index 0e82e29584e..f1f5e51f127 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -233,7 +233,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no loggingQuota := glogx.PodsLoggingQuota() - tryNodeForPod := func(nodename string, pod *apiv1.Pod, predicateMeta predicates.PredicateMetadata) bool { + tryNodeForPod := func(nodename string, pod *apiv1.Pod, predicateMeta predicates.Metadata) bool { nodeInfo, found := newNodeInfos[nodename] if found { if nodeInfo.Node() == nil { diff --git a/cluster-autoscaler/simulator/predicates.go b/cluster-autoscaler/simulator/predicates.go index bb72d3fbc31..736ab53dae1 100644 --- a/cluster-autoscaler/simulator/predicates.go +++ b/cluster-autoscaler/simulator/predicates.go @@ -21,22 +21,23 @@ import ( "strings" "sync" + kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" - informers "k8s.io/client-go/informers" + "k8s.io/client-go/informers" kube_client "k8s.io/client-go/kubernetes" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "k8s.io/kubernetes/pkg/scheduler/factory" + schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" + schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - + schedulersnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" // We need to import provider to initialize default scheduler. "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" - - "k8s.io/klog" ) const ( @@ -61,7 +62,7 @@ type PredicateInfo struct { // PredicateChecker checks whether all required predicates pass for given Pod and Node. type PredicateChecker struct { predicates []PredicateInfo - predicateMetadataProducer predicates.PredicateMetadataProducer + predicateMetadataProducer predicates.MetadataProducer enableAffinityPredicate bool } @@ -111,53 +112,23 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{}) staticInitIfNeeded() informerFactory := informers.NewSharedInformerFactory(kubeClient, 0) - algorithmProvider := factory.DefaultProvider - - // Set up the configurator which can create schedulers from configs. - nodeInformer := informerFactory.Core().V1().Nodes() podInformer := informerFactory.Core().V1().Pods() - pvInformer := informerFactory.Core().V1().PersistentVolumes() - pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() - replicationControllerInformer := informerFactory.Core().V1().ReplicationControllers() - replicaSetInformer := informerFactory.Apps().V1().ReplicaSets() - statefulSetInformer := informerFactory.Apps().V1().StatefulSets() - serviceInformer := informerFactory.Core().V1().Services() - pdbInformer := informerFactory.Policy().V1beta1().PodDisruptionBudgets() - storageClassInformer := informerFactory.Storage().V1().StorageClasses() - csiNodeInformer := informerFactory.Storage().V1beta1().CSINodes() - - configurator := factory.NewConfigFactory(&factory.ConfigFactoryArgs{ - Client: kubeClient, - NodeInformer: nodeInformer, - PodInformer: podInformer, - PvInformer: pvInformer, - PvcInformer: pvcInformer, - ReplicationControllerInformer: replicationControllerInformer, - ReplicaSetInformer: replicaSetInformer, - StatefulSetInformer: statefulSetInformer, - ServiceInformer: serviceInformer, - PdbInformer: pdbInformer, - StorageClassInformer: storageClassInformer, - HardPodAffinitySymmetricWeight: apiv1.DefaultHardPodAffinitySymmetricWeight, - DisablePreemption: false, - PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore, - BindTimeoutSeconds: scheduler.BindTimeoutSeconds, - }) - // Create the config from a named algorithm provider. - config, err := configurator.CreateFromProvider(algorithmProvider) - if err != nil { - return nil, fmt.Errorf("couldn't create scheduler using provider %q: %v", algorithmProvider, err) + defaultProviderName := schedulerconfig.SchedulerDefaultProviderName + algorithmSource := schedulerconfig.SchedulerAlgorithmSource{ + Provider: &defaultProviderName, } - // Additional tweaks to the config produced by the configurator. - config.Recorder = NoOpEventRecorder{} - config.DisablePreemption = false - config.StopEverything = stop - // Create the scheduler. - sched := scheduler.NewFromConfig(config) - - scheduler.AddAllEventHandlers(sched, apiv1.DefaultSchedulerName, - nodeInformer, podInformer, pvInformer, pvcInformer, serviceInformer, storageClassInformer, csiNodeInformer) + sched, err := scheduler.New( + kubeClient, + informerFactory, + podInformer, + NoOpEventRecorder{}, + stop, + scheduler.WithAlgorithmSource(algorithmSource), + ) + if err != nil { + return nil, fmt.Errorf("couldn't create scheduler; %v", err) + } predicateMap := map[string]predicates.FitPredicate{} for predicateName, predicateFunc := range sched.Algorithm.Predicates() { @@ -192,20 +163,15 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{}) informerFactory.Start(stop) - metadataProducer, err := configurator.GetPredicateMetadataProducer() - if err != nil { - return nil, fmt.Errorf("could not obtain predicateMetadataProducer; %v", err.Error()) - } - return &PredicateChecker{ predicates: predicateList, - predicateMetadataProducer: metadataProducer, + predicateMetadataProducer: sched.Algorithm.PredicateMetadataProducer(), enableAffinityPredicate: true, }, nil } // IsNodeReadyAndSchedulablePredicate checks if node is ready. -func IsNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, +func IsNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) { ready := kube_util.IsNodeReadyAndSchedulable(nodeInfo.Node()) if !ready { @@ -221,7 +187,7 @@ func NewTestPredicateChecker() *PredicateChecker { {Name: "default", Predicate: predicates.GeneralPredicates}, {Name: "ready", Predicate: IsNodeReadyAndSchedulablePredicate}, }, - predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulernodeinfo.NodeInfo) predicates.PredicateMetadata { + predicateMetadataProducer: func(_ *apiv1.Pod, _ schedulerlisters.SharedLister) predicates.Metadata { return nil }, } @@ -232,7 +198,7 @@ func NewTestPredicateChecker() *PredicateChecker { func NewCustomTestPredicateChecker(predicateInfos []PredicateInfo) *PredicateChecker { return &PredicateChecker{ predicates: predicateInfos, - predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulernodeinfo.NodeInfo) predicates.PredicateMetadata { + predicateMetadataProducer: func(_ *apiv1.Pod, _ schedulerlisters.SharedLister) predicates.Metadata { return nil }, } @@ -256,12 +222,21 @@ func (p *PredicateChecker) IsAffinityPredicateEnabled() bool { // improve the performance of running predicates, especially MatchInterPodAffinity predicate. However, calculating // predicateMetadata is also quite expensive, so it's not always the best option to run this method. // Please refer to https://github.com/kubernetes/autoscaler/issues/257 for more details. -func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) predicates.PredicateMetadata { +func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) predicates.Metadata { // Skip precomputation if affinity predicate is disabled - it's not worth it performance-wise. if !p.enableAffinityPredicate { return nil } - return p.predicateMetadataProducer(pod, nodeInfos) + nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfos)) + for _, v := range nodeInfos { + nodeInfoList = append(nodeInfoList, v) + } + + snapshot := &schedulersnapshot.Snapshot{ + NodeInfoMap: nodeInfos, + NodeInfoList: nodeInfoList, + } + return p.predicateMetadataProducer(pod, snapshot) } // FitsAny checks if the given pod can be place on any of the given nodes. @@ -352,7 +327,7 @@ func (pe *PredicateError) PredicateName() string { // it was calculated using NodeInfo map representing different cluster state and the // performance gains of CheckPredicates won't always offset the cost of GetPredicateMetadata. // Alternatively you can pass nil as predicateMetadata. -func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError { +func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError { for _, predInfo := range p.predicates { // Skip affinity predicate if it has been disabled. if !p.enableAffinityPredicate && predInfo.Name == affinityPredicateName { From 889ea1e4b87c304288e1f1e733cdbc45414b9ec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 13 Nov 2019 16:54:36 +0100 Subject: [PATCH 5/9] Use snapshot.NewSnapshot --- cluster-autoscaler/simulator/predicates.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/cluster-autoscaler/simulator/predicates.go b/cluster-autoscaler/simulator/predicates.go index 736ab53dae1..0758c643fe5 100644 --- a/cluster-autoscaler/simulator/predicates.go +++ b/cluster-autoscaler/simulator/predicates.go @@ -227,16 +227,7 @@ func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[st if !p.enableAffinityPredicate { return nil } - nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfos)) - for _, v := range nodeInfos { - nodeInfoList = append(nodeInfoList, v) - } - - snapshot := &schedulersnapshot.Snapshot{ - NodeInfoMap: nodeInfos, - NodeInfoList: nodeInfoList, - } - return p.predicateMetadataProducer(pod, snapshot) + return p.predicateMetadataProducer(pod, schedulersnapshot.NewSnapshot(nodeInfos)) } // FitsAny checks if the given pod can be place on any of the given nodes. From f50d9ecce218b1ab31c80ad0d26b1486e3e51007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:21:33 +0100 Subject: [PATCH 6/9] Use MoreImportantPod instead GetPodPriority --- cluster-autoscaler/core/filter_out_schedulable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-autoscaler/core/filter_out_schedulable.go b/cluster-autoscaler/core/filter_out_schedulable.go index aaf9c6da684..841aec1e90a 100644 --- a/cluster-autoscaler/core/filter_out_schedulable.go +++ b/cluster-autoscaler/core/filter_out_schedulable.go @@ -108,7 +108,7 @@ func filterOutSchedulableByPacking(unschedulableCandidates []*apiv1.Pod, nodes [ nodeNameToNodeInfo := schedulerutil.CreateNodeNameToInfoMap(nonExpendableScheduled, nodes) sort.Slice(unschedulableCandidates, func(i, j int) bool { - return util.GetPodPriority(unschedulableCandidates[i]) > util.GetPodPriority(unschedulableCandidates[j]) + return util.MoreImportantPod(unschedulableCandidates[i], unschedulableCandidates[j]) }) for _, pod := range unschedulableCandidates { From 1039e93815ab498923f32e9edfc55eb911b4284f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:25:52 +0100 Subject: [PATCH 7/9] Use quota.Equals instead of V1Equals --- cluster-autoscaler/cloudprovider/gce/templates_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-autoscaler/cloudprovider/gce/templates_test.go b/cluster-autoscaler/cloudprovider/gce/templates_test.go index f557f093c31..e289dba5ecc 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates_test.go +++ b/cluster-autoscaler/cloudprovider/gce/templates_test.go @@ -614,7 +614,7 @@ func makeResourceList2(cpu int64, memory int64, gpu int64, pods int64) (apiv1.Re func assertEqualResourceLists(t *testing.T, name string, expected, actual apiv1.ResourceList) { t.Helper() - assert.True(t, quota.V1Equals(expected, actual), + assert.True(t, quota.Equals(expected, actual), "%q unequal:\nExpected: %v\nActual: %v", name, stringifyResourceList(expected), stringifyResourceList(actual)) } From 1b6c75f4f9c7213f09a555864ec16286a19de601 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Wed, 30 Oct 2019 22:34:26 +0100 Subject: [PATCH 8/9] Set StartTime in Pod objects used in tests --- cluster-autoscaler/utils/test/test_utils.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cluster-autoscaler/utils/test/test_utils.go b/cluster-autoscaler/utils/test/test_utils.go index 0d6604d056e..a7e315c179b 100644 --- a/cluster-autoscaler/utils/test/test_utils.go +++ b/cluster-autoscaler/utils/test/test_utils.go @@ -37,6 +37,7 @@ import ( // BuildTestPod creates a pod with specified resources. func BuildTestPod(name string, cpu int64, mem int64) *apiv1.Pod { + startTime := metav1.Unix(0, 0) pod := &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID(name), @@ -54,6 +55,9 @@ func BuildTestPod(name string, cpu int64, mem int64) *apiv1.Pod { }, }, }, + Status: apiv1.PodStatus{ + StartTime: &startTime, + }, } if cpu >= 0 { From 8161ca6fdb269f36a30624438300b2c7dac3ec77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Fri, 8 Nov 2019 13:53:30 +0100 Subject: [PATCH 9/9] Use taint keys from k8s.io/api/core/v1 module --- .../core/utils/taint_key_set.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/cluster-autoscaler/core/utils/taint_key_set.go b/cluster-autoscaler/core/utils/taint_key_set.go index d97790e0052..d5cb1278199 100644 --- a/cluster-autoscaler/core/utils/taint_key_set.go +++ b/cluster-autoscaler/core/utils/taint_key_set.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + v1 "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) @@ -26,14 +27,14 @@ type TaintKeySet map[string]bool var ( // NodeConditionTaints lists taint keys used as node conditions NodeConditionTaints = TaintKeySet{ - schedulerapi.TaintNodeNotReady: true, - schedulerapi.TaintNodeUnreachable: true, - schedulerapi.TaintNodeUnschedulable: true, - schedulerapi.TaintNodeMemoryPressure: true, - schedulerapi.TaintNodeDiskPressure: true, - schedulerapi.TaintNodeNetworkUnavailable: true, - schedulerapi.TaintNodePIDPressure: true, - schedulerapi.TaintExternalCloudProvider: true, - schedulerapi.TaintNodeShutdown: true, + v1.TaintNodeNotReady: true, + v1.TaintNodeUnreachable: true, + v1.TaintNodeUnschedulable: true, + v1.TaintNodeMemoryPressure: true, + v1.TaintNodeDiskPressure: true, + v1.TaintNodeNetworkUnavailable: true, + v1.TaintNodePIDPressure: true, + schedulerapi.TaintExternalCloudProvider: true, + schedulerapi.TaintNodeShutdown: true, } )